mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
Fresh dep ensure
This commit is contained in:
16
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
16
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
@ -13,14 +13,14 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -35,14 +35,17 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/integration/apimachinery:all-srcs",
|
||||
"//test/integration/apiserver:all-srcs",
|
||||
"//test/integration/auth:all-srcs",
|
||||
"//test/integration/benchmark/jsonify:all-srcs",
|
||||
"//test/integration/client:all-srcs",
|
||||
"//test/integration/configmap:all-srcs",
|
||||
"//test/integration/cronjob:all-srcs",
|
||||
"//test/integration/daemonset:all-srcs",
|
||||
"//test/integration/defaulttolerationseconds:all-srcs",
|
||||
"//test/integration/deployment:all-srcs",
|
||||
"//test/integration/dryrun:all-srcs",
|
||||
"//test/integration/etcd:all-srcs",
|
||||
"//test/integration/evictions:all-srcs",
|
||||
"//test/integration/examples:all-srcs",
|
||||
@ -62,6 +65,7 @@ filegroup(
|
||||
"//test/integration/scheduler_perf:all-srcs",
|
||||
"//test/integration/secrets:all-srcs",
|
||||
"//test/integration/serviceaccount:all-srcs",
|
||||
"//test/integration/serving:all-srcs",
|
||||
"//test/integration/statefulset:all-srcs",
|
||||
"//test/integration/storageclasses:all-srcs",
|
||||
"//test/integration/tls:all-srcs",
|
||||
|
38
vendor/k8s.io/kubernetes/test/integration/apimachinery/BUILD
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/test/integration/apimachinery/BUILD
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"watch_restart_test.go",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/apimachinery/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/apimachinery/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
258
vendor/k8s.io/kubernetes/test/integration/apimachinery/watch_restart_test.go
generated
vendored
Normal file
258
vendor/k8s.io/kubernetes/test/integration/apimachinery/watch_restart_test.go
generated
vendored
Normal file
@ -0,0 +1,258 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func noopNormalization(output []string) []string {
|
||||
return output
|
||||
}
|
||||
|
||||
func normalizeInformerOutputFunc(initialVal string) func(output []string) []string {
|
||||
return func(output []string) []string {
|
||||
result := make([]string, 0, len(output))
|
||||
|
||||
// Removes initial value and all of its direct repetitions
|
||||
lastVal := initialVal
|
||||
for _, v := range output {
|
||||
// Make values unique as informer(List+Watch) duplicates some events
|
||||
if v == lastVal {
|
||||
continue
|
||||
}
|
||||
result = append(result, v)
|
||||
lastVal = v
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchRestartsIfTimeoutNotReached(t *testing.T) {
|
||||
// Has to be longer than 5 seconds
|
||||
timeout := 2 * time.Minute
|
||||
|
||||
// Set up a master
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
// Timeout is set random between MinRequestTimeout and 2x
|
||||
masterConfig.GenericConfig.MinRequestTimeout = int(timeout.Seconds()) / 4
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
|
||||
config := &restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[corev1.GroupName].GroupVersion()},
|
||||
}
|
||||
|
||||
namespaceObject := framework.CreateTestingNamespace("retry-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(namespaceObject, s, t)
|
||||
|
||||
getListFunc := func(c *kubernetes.Clientset, secret *v1.Secret) func(options metav1.ListOptions) *v1.SecretList {
|
||||
return func(options metav1.ListOptions) *v1.SecretList {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", secret.Name).String()
|
||||
res, err := c.CoreV1().Secrets(secret.Namespace).List(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list Secrets: %v", err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
}
|
||||
|
||||
getWatchFunc := func(c *kubernetes.Clientset, secret *v1.Secret) func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", secret.Name).String()
|
||||
res, err := c.CoreV1().Secrets(secret.Namespace).Watch(options)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a watcher on Secrets: %v", err)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
}
|
||||
|
||||
generateEvents := func(t *testing.T, c *kubernetes.Clientset, secret *v1.Secret, referenceOutput *[]string, stopChan chan struct{}, stoppedChan chan struct{}) {
|
||||
defer close(stoppedChan)
|
||||
counter := 0
|
||||
|
||||
// These 5 seconds are here to protect against a race at the end when we could write something there at the same time as watch.Until ends
|
||||
softTimeout := timeout - 5*time.Second
|
||||
if softTimeout < 0 {
|
||||
panic("Timeout has to be grater than 5 seconds!")
|
||||
}
|
||||
endChannel := time.After(softTimeout)
|
||||
for {
|
||||
select {
|
||||
// TODO: get this lower once we figure out how to extend ETCD cache
|
||||
case <-time.After(1000 * time.Millisecond):
|
||||
counter = counter + 1
|
||||
|
||||
patch := fmt.Sprintf(`{"metadata": {"annotations": {"count": "%d"}}}`, counter)
|
||||
_, err := c.CoreV1().Secrets(secret.Namespace).Patch(secret.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch secret: %v", err)
|
||||
}
|
||||
|
||||
*referenceOutput = append(*referenceOutput, fmt.Sprintf("%d", counter))
|
||||
case <-endChannel:
|
||||
return
|
||||
case <-stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
initialCount := "0"
|
||||
newTestSecret := func(name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespaceObject.Name,
|
||||
Annotations: map[string]string{
|
||||
"count": initialCount,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data": []byte("value1\n"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
tt := []struct {
|
||||
name string
|
||||
succeed bool
|
||||
secret *v1.Secret
|
||||
getWatcher func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error)
|
||||
normalizeOutputFunc func(referenceOutput []string) []string
|
||||
}{
|
||||
{
|
||||
name: "regular watcher should fail",
|
||||
succeed: false,
|
||||
secret: newTestSecret("secret-01"),
|
||||
getWatcher: func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error) {
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: secret.ResourceVersion,
|
||||
}
|
||||
return getWatchFunc(c, secret)(options)
|
||||
}, // regular watcher; unfortunately destined to fail
|
||||
normalizeOutputFunc: noopNormalization,
|
||||
},
|
||||
{
|
||||
name: "InformerWatcher survives closed watches",
|
||||
succeed: true,
|
||||
secret: newTestSecret("secret-03"),
|
||||
getWatcher: func(c *kubernetes.Clientset, secret *v1.Secret) (watch.Interface, error) {
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return getListFunc(c, secret)(options), nil
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return getWatchFunc(c, secret)(options)
|
||||
},
|
||||
}
|
||||
_, _, w := watchtools.NewIndexerInformerWatcher(lw, &v1.Secret{})
|
||||
return w, nil
|
||||
},
|
||||
normalizeOutputFunc: normalizeInformerOutputFunc(initialCount),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tmptc := range tt {
|
||||
tc := tmptc // we need to copy it for parallel runs
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
c, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create clientset: %v", err)
|
||||
}
|
||||
|
||||
secret, err := c.CoreV1().Secrets(tc.secret.Namespace).Create(tc.secret)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create testing secret %s/%s: %v", tc.secret.Namespace, tc.secret.Name, err)
|
||||
}
|
||||
|
||||
watcher, err := tc.getWatcher(c, secret)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create watcher: %v", err)
|
||||
}
|
||||
|
||||
var referenceOutput []string
|
||||
var output []string
|
||||
stopChan := make(chan struct{})
|
||||
stoppedChan := make(chan struct{})
|
||||
go generateEvents(t, c, secret, &referenceOutput, stopChan, stoppedChan)
|
||||
|
||||
// Record current time to be able to asses if the timeout has been reached
|
||||
startTime := time.Now()
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
s, ok := event.Object.(*v1.Secret)
|
||||
if !ok {
|
||||
t.Fatalf("Received an object that is not a Secret: %#v", event.Object)
|
||||
}
|
||||
output = append(output, s.Annotations["count"])
|
||||
// Watch will never end voluntarily
|
||||
return false, nil
|
||||
})
|
||||
watchDuration := time.Since(startTime)
|
||||
close(stopChan)
|
||||
<-stoppedChan
|
||||
|
||||
output = tc.normalizeOutputFunc(output)
|
||||
|
||||
t.Logf("Watch duration: %v; timeout: %v", watchDuration, timeout)
|
||||
|
||||
if err == nil && !tc.succeed {
|
||||
t.Fatalf("Watch should have timed out but it exited without an error!")
|
||||
}
|
||||
|
||||
if err != wait.ErrWaitTimeout && tc.succeed {
|
||||
t.Fatalf("Watch exited with error: %v!", err)
|
||||
}
|
||||
|
||||
if watchDuration < timeout && tc.succeed {
|
||||
t.Fatalf("Watch should have timed out after %v but it timed out prematurely after %v!", timeout, watchDuration)
|
||||
}
|
||||
|
||||
if watchDuration >= timeout && !tc.succeed {
|
||||
t.Fatalf("Watch should have timed out but it succeeded!")
|
||||
}
|
||||
|
||||
if tc.succeed && !reflect.DeepEqual(referenceOutput, output) {
|
||||
t.Fatalf("Reference and real output differ! We must have lost some events or read some multiple times!\nRef: %#v\nReal: %#v", referenceOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
49
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
49
vendor/k8s.io/kubernetes/test/integration/apiserver/BUILD
generated
vendored
@ -23,36 +23,37 @@ go_test(
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubectl/genericclioptions:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/printers:go_default_library",
|
||||
"//pkg/printers/internalversion:go_default_library",
|
||||
"//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v2alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/settings/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/pager:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/settings/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/pager:go_default_library",
|
||||
"//vendor/k8s.io/gengo/examples/set-gen/sets:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/apiserver/apiserver_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/apiserver/apiserver_test.go
generated
vendored
@ -26,7 +26,6 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@ -38,6 +37,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
@ -68,7 +68,7 @@ func verifyStatusCode(t *testing.T, verb, URL, body string, expectedStatusCode i
|
||||
t.Fatalf("unexpected error: %v in sending req with verb: %s, URL: %s and body: %s", err, verb, URL, body)
|
||||
}
|
||||
transport := http.DefaultTransport
|
||||
glog.Infof("Sending request: %v", req)
|
||||
klog.Infof("Sending request: %v", req)
|
||||
resp, err := transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v in req: %v", err, req)
|
||||
|
11
vendor/k8s.io/kubernetes/test/integration/apiserver/print_test.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/integration/apiserver/print_test.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
auditregv1alpha1 "k8s.io/api/auditregistration/v1alpha1"
|
||||
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
|
||||
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
|
||||
schedulerapi "k8s.io/api/scheduling/v1beta1"
|
||||
@ -34,13 +35,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/gengo/examples/set-gen/sets"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@ -55,6 +56,8 @@ var kindWhiteList = sets.NewString(
|
||||
"ExportOptions",
|
||||
"GetOptions",
|
||||
"ListOptions",
|
||||
"CreateOptions",
|
||||
"UpdateOptions",
|
||||
"NodeProxyOptions",
|
||||
"PodAttachOptions",
|
||||
"PodExecOptions",
|
||||
@ -111,10 +114,6 @@ var kindWhiteList = sets.NewString(
|
||||
"Eviction",
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/componentconfig
|
||||
"KubeSchedulerConfiguration",
|
||||
// --
|
||||
|
||||
// k8s.io/apimachinery/pkg/apis/meta
|
||||
"WatchEvent",
|
||||
"Status",
|
||||
@ -132,11 +131,13 @@ var missingHanlders = sets.NewString(
|
||||
"VolumeAttachment",
|
||||
"PriorityClass",
|
||||
"PodPreset",
|
||||
"AuditSink",
|
||||
)
|
||||
|
||||
func TestServerSidePrint(t *testing.T) {
|
||||
s, _, closeFn := setup(t,
|
||||
// additional groupversions needed for the test to run
|
||||
auditregv1alpha1.SchemeGroupVersion,
|
||||
batchv2alpha1.SchemeGroupVersion,
|
||||
rbacv1alpha1.SchemeGroupVersion,
|
||||
settingsv1alpha1.SchemeGroupVersion,
|
||||
|
83
vendor/k8s.io/kubernetes/test/integration/auth/BUILD
generated
vendored
83
vendor/k8s.io/kubernetes/test/integration/auth/BUILD
generated
vendored
@ -17,23 +17,25 @@ go_test(
|
||||
"rbac_test.go",
|
||||
"svcaccttoken_test.go",
|
||||
],
|
||||
data = [
|
||||
"//staging/src/k8s.io/csi-api/pkg/crd:csi-manifests",
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/authorization:go_default_library",
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
"//pkg/apis/coordination:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/auth/authorizer/abac:go_default_library",
|
||||
"//pkg/auth/nodeidentifier:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller/serviceaccount:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/authorizer:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/registry/rbac/clusterrole:go_default_library",
|
||||
"//pkg/registry/rbac/clusterrole/storage:go_default_library",
|
||||
@ -44,46 +46,53 @@ go_test(
|
||||
"//pkg/registry/rbac/rolebinding:go_default_library",
|
||||
"//pkg/registry/rbac/rolebinding/storage:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//plugin/pkg/admission/noderestriction:go_default_library",
|
||||
"//plugin/pkg/auth/authenticator/token/bootstrap:go_default_library",
|
||||
"//plugin/pkg/auth/authorizer/rbac:go_default_library",
|
||||
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/authentication/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/token/cache:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/registry/generic:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/transport:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||
"//test/e2e/lifecycle/bootstrap:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/etcd:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/transport:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
14
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
@ -46,9 +46,11 @@ func (sarAuthorizer) Authorize(a authorizer.Attributes) (authorizer.Decision, st
|
||||
return authorizer.DecisionAllow, "you're not dave", nil
|
||||
}
|
||||
|
||||
func alwaysAlice(req *http.Request) (user.Info, bool, error) {
|
||||
return &user.DefaultInfo{
|
||||
Name: "alice",
|
||||
func alwaysAlice(req *http.Request) (*authenticator.Response, bool, error) {
|
||||
return &authenticator.Response{
|
||||
User: &user.DefaultInfo{
|
||||
Name: "alice",
|
||||
},
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
@ -145,8 +147,10 @@ func TestSubjectAccessReview(t *testing.T) {
|
||||
func TestSelfSubjectAccessReview(t *testing.T) {
|
||||
username := "alice"
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) {
|
||||
return &user.DefaultInfo{Name: username}, true, nil
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator.RequestFunc(func(req *http.Request) (*authenticator.Response, bool, error) {
|
||||
return &authenticator.Response{
|
||||
User: &user.DefaultInfo{Name: username},
|
||||
}, true, nil
|
||||
})
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = sarAuthorizer{}
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
7
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
@ -40,6 +40,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/authentication/group"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/apiserver/pkg/authentication/token/cache"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
@ -84,11 +85,11 @@ func getTestWebhookTokenAuth(serverURL string) (authenticator.Request, error) {
|
||||
if err := json.NewEncoder(kubecfgFile).Encode(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
webhookTokenAuth, err := webhook.New(kubecfgFile.Name(), 2*time.Minute)
|
||||
webhookTokenAuth, err := webhook.New(kubecfgFile.Name(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bearertoken.New(webhookTokenAuth), nil
|
||||
return bearertoken.New(cache.New(webhookTokenAuth, false, 2*time.Minute, 2*time.Minute)), nil
|
||||
}
|
||||
|
||||
func path(resource, namespace, name string) string {
|
||||
@ -397,7 +398,7 @@ func getTestRequests(namespace string) []struct {
|
||||
return requests
|
||||
}
|
||||
|
||||
// The TestAuthMode* tests tests a large number of URLs and checks that they
|
||||
// The TestAuthMode* tests a large number of URLs and checks that they
|
||||
// are FORBIDDEN or not, depending on the mode. They do not attempt to do
|
||||
// detailed verification of behaviour beyond authorization. They are not
|
||||
// fuzz tests.
|
||||
|
24
vendor/k8s.io/kubernetes/test/integration/auth/bootstraptoken_test.go
generated
vendored
24
vendor/k8s.io/kubernetes/test/integration/auth/bootstraptoken_test.go
generated
vendored
@ -24,24 +24,24 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
|
||||
bootstraputil "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type bootstrapSecrets []*api.Secret
|
||||
type bootstrapSecrets []*corev1.Secret
|
||||
|
||||
func (b bootstrapSecrets) List(selector labels.Selector) (ret []*api.Secret, err error) {
|
||||
func (b bootstrapSecrets) List(selector labels.Selector) (ret []*corev1.Secret, err error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (b bootstrapSecrets) Get(name string) (*api.Secret, error) {
|
||||
func (b bootstrapSecrets) Get(name string) (*corev1.Secret, error) {
|
||||
return b[0], nil
|
||||
}
|
||||
|
||||
@ -55,36 +55,36 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
var bootstrapSecretValid = &api.Secret{
|
||||
var bootstrapSecretValid = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
Name: bootstrapapi.BootstrapTokenSecretPrefix,
|
||||
},
|
||||
Type: api.SecretTypeBootstrapToken,
|
||||
Type: corev1.SecretTypeBootstrapToken,
|
||||
Data: map[string][]byte{
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
||||
bootstrapapi.BootstrapTokenSecretKey: []byte(secret),
|
||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||
},
|
||||
}
|
||||
var bootstrapSecretInvalid = &api.Secret{
|
||||
var bootstrapSecretInvalid = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
Name: bootstrapapi.BootstrapTokenSecretPrefix,
|
||||
},
|
||||
Type: api.SecretTypeBootstrapToken,
|
||||
Type: corev1.SecretTypeBootstrapToken,
|
||||
Data: map[string][]byte{
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
||||
bootstrapapi.BootstrapTokenSecretKey: []byte("invalid"),
|
||||
bootstrapapi.BootstrapTokenUsageAuthentication: []byte("true"),
|
||||
},
|
||||
}
|
||||
var expiredBootstrapToken = &api.Secret{
|
||||
var expiredBootstrapToken = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
Name: bootstrapapi.BootstrapTokenSecretPrefix,
|
||||
},
|
||||
Type: api.SecretTypeBootstrapToken,
|
||||
Type: corev1.SecretTypeBootstrapToken,
|
||||
Data: map[string][]byte{
|
||||
bootstrapapi.BootstrapTokenIDKey: []byte(tokenId),
|
||||
bootstrapapi.BootstrapTokenSecretKey: []byte("invalid"),
|
||||
@ -101,7 +101,7 @@ func TestBootstrapTokenAuth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
request request
|
||||
secret *api.Secret
|
||||
secret *corev1.Secret
|
||||
}{
|
||||
{
|
||||
name: "valid token",
|
||||
|
277
vendor/k8s.io/kubernetes/test/integration/auth/node_test.go
generated
vendored
277
vendor/k8s.io/kubernetes/test/integration/auth/node_test.go
generated
vendored
@ -18,45 +18,38 @@ package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authentication/token/tokenfile"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
versionedinformers "k8s.io/client-go/informers"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
|
||||
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/coordination"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/noderestriction"
|
||||
"k8s.io/kubernetes/test/integration/etcd"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func TestNodeAuthorizer(t *testing.T) {
|
||||
// Start the server so we know the address
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
const (
|
||||
// Define credentials
|
||||
tokenMaster = "master-token"
|
||||
@ -65,57 +58,44 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
tokenNode2 = "node2-token"
|
||||
)
|
||||
|
||||
authenticator := bearertoken.New(tokenfile.New(map[string]*user.DefaultInfo{
|
||||
tokenMaster: {Name: "admin", Groups: []string{"system:masters"}},
|
||||
tokenNodeUnknown: {Name: "unknown", Groups: []string{"system:nodes"}},
|
||||
tokenNode1: {Name: "system:node:node1", Groups: []string{"system:nodes"}},
|
||||
tokenNode2: {Name: "system:node:node2", Groups: []string{"system:nodes"}},
|
||||
}))
|
||||
|
||||
// Build client config, clientset, and informers
|
||||
clientConfig := &restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
|
||||
superuserClient, superuserClientExternal := clientsetForToken(tokenMaster, clientConfig)
|
||||
informerFactory := informers.NewSharedInformerFactory(superuserClient, time.Minute)
|
||||
versionedInformerFactory := versionedinformers.NewSharedInformerFactory(superuserClientExternal, time.Minute)
|
||||
|
||||
// Enabled CSIPersistentVolume feature at startup so volumeattachments get watched
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIPersistentVolume, true)()
|
||||
|
||||
// Enable DynamicKubeletConfig feature so that Node.Spec.ConfigSource can be set
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DynamicKubeletConfig, true)()
|
||||
|
||||
// Set up Node+RBAC authorizer
|
||||
authorizerConfig := &authorizer.AuthorizationConfig{
|
||||
AuthorizationModes: []string{"Node", "RBAC"},
|
||||
InformerFactory: informerFactory,
|
||||
VersionedInformerFactory: versionedInformerFactory,
|
||||
}
|
||||
nodeRBACAuthorizer, _, err := authorizerConfig.New()
|
||||
// Enable NodeLease feature so that nodes can create leases
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
|
||||
|
||||
// Enable CSINodeInfo feature so that nodes can create CSINodeInfo objects.
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSINodeInfo, true)()
|
||||
|
||||
tokenFile, err := ioutil.TempFile("", "kubeconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tokenFile.WriteString(strings.Join([]string{
|
||||
fmt.Sprintf(`%s,admin,uid1,"system:masters"`, tokenMaster),
|
||||
fmt.Sprintf(`%s,unknown,uid2,"system:nodes"`, tokenNodeUnknown),
|
||||
fmt.Sprintf(`%s,system:node:node1,uid3,"system:nodes"`, tokenNode1),
|
||||
fmt.Sprintf(`%s,system:node:node2,uid4,"system:nodes"`, tokenNode2),
|
||||
}, "\n"))
|
||||
tokenFile.Close()
|
||||
|
||||
// Set up NodeRestriction admission
|
||||
nodeRestrictionAdmission := noderestriction.NewPlugin(nodeidentifier.NewDefaultNodeIdentifier())
|
||||
nodeRestrictionAdmission.SetInternalKubeInformerFactory(informerFactory)
|
||||
if err := nodeRestrictionAdmission.ValidateInitialization(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{
|
||||
"--authorization-mode", "Node,RBAC",
|
||||
"--token-auth-file", tokenFile.Name(),
|
||||
"--enable-admission-plugins", "NodeRestriction",
|
||||
// The "default" SA is not installed, causing the ServiceAccount plugin to retry for ~1s per
|
||||
// API request.
|
||||
"--disable-admission-plugins", "ServiceAccount",
|
||||
}, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
// Start the server
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = nodeRBACAuthorizer
|
||||
masterConfig.GenericConfig.AdmissionControl = nodeRestrictionAdmission
|
||||
|
||||
_, _, closeFn := framework.RunAMasterUsingServer(masterConfig, apiServer, h)
|
||||
defer closeFn()
|
||||
|
||||
// Start the informers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informerFactory.Start(stopCh)
|
||||
versionedInformerFactory.Start(stopCh)
|
||||
// Build client config and superuser clientset
|
||||
clientConfig := server.ClientConfig
|
||||
superuserClient, superuserClientExternal := clientsetForToken(tokenMaster, clientConfig)
|
||||
superuserCRDClient := crdClientsetForToken(tokenMaster, clientConfig)
|
||||
|
||||
// Wait for a healthy server
|
||||
for {
|
||||
@ -129,6 +109,10 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create objects
|
||||
if _, err := superuserClient.Core().Namespaces().Create(&core.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := superuserClient.Core().Secrets("ns").Create(&api.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -174,6 +158,13 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
csiNodeInfoCRD, err := crdFromManifest("../../../staging/src/k8s.io/csi-api/pkg/crd/manifests/csinodeinfo.yaml")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
etcd.CreateTestCRDs(t, superuserCRDClient, false, csiNodeInfoCRD)
|
||||
|
||||
getSecret := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{})
|
||||
@ -369,9 +360,132 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
getNode1Lease := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.Coordination().Leases(api.NamespaceNodeLease).Get("node1", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
node1LeaseDurationSeconds := int32(40)
|
||||
createNode1Lease := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
lease := &coordination.Lease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Spec: coordination.LeaseSpec{
|
||||
HolderIdentity: pointer.StringPtr("node1"),
|
||||
LeaseDurationSeconds: pointer.Int32Ptr(node1LeaseDurationSeconds),
|
||||
RenewTime: &metav1.MicroTime{Time: time.Now()},
|
||||
},
|
||||
}
|
||||
_, err := client.Coordination().Leases(api.NamespaceNodeLease).Create(lease)
|
||||
return err
|
||||
}
|
||||
}
|
||||
updateNode1Lease := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
lease, err := client.Coordination().Leases(api.NamespaceNodeLease).Get("node1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()}
|
||||
_, err = client.Coordination().Leases(api.NamespaceNodeLease).Update(lease)
|
||||
return err
|
||||
}
|
||||
}
|
||||
patchNode1Lease := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
node1LeaseDurationSeconds++
|
||||
bs := []byte(fmt.Sprintf(`{"spec": {"leaseDurationSeconds": %d}}`, node1LeaseDurationSeconds))
|
||||
_, err := client.Coordination().Leases(api.NamespaceNodeLease).Patch("node1", types.StrategicMergePatchType, bs)
|
||||
return err
|
||||
}
|
||||
}
|
||||
deleteNode1Lease := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
return client.Coordination().Leases(api.NamespaceNodeLease).Delete("node1", &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
getNode1CSINodeInfo := func(client csiclientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.CsiV1alpha1().CSINodeInfos().Get("node1", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
createNode1CSINodeInfo := func(client csiclientset.Interface) func() error {
|
||||
return func() error {
|
||||
nodeInfo := &csiv1alpha1.CSINodeInfo{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Spec: csiv1alpha1.CSINodeInfoSpec{
|
||||
Drivers: []csiv1alpha1.CSIDriverInfoSpec{
|
||||
{
|
||||
Name: "com.example.csi/driver1",
|
||||
NodeID: "com.example.csi/node1",
|
||||
TopologyKeys: []string{"com.example.csi/zone"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: csiv1alpha1.CSINodeInfoStatus{
|
||||
Drivers: []csiv1alpha1.CSIDriverInfoStatus{
|
||||
{
|
||||
Name: "com.example.csi/driver1",
|
||||
Available: true,
|
||||
VolumePluginMechanism: csiv1alpha1.VolumePluginMechanismInTree,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := client.CsiV1alpha1().CSINodeInfos().Create(nodeInfo)
|
||||
return err
|
||||
}
|
||||
}
|
||||
updateNode1CSINodeInfo := func(client csiclientset.Interface) func() error {
|
||||
return func() error {
|
||||
nodeInfo, err := client.CsiV1alpha1().CSINodeInfos().Get("node1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeInfo.Spec.Drivers = []csiv1alpha1.CSIDriverInfoSpec{
|
||||
{
|
||||
Name: "com.example.csi/driver1",
|
||||
NodeID: "com.example.csi/node1",
|
||||
TopologyKeys: []string{"com.example.csi/rack"},
|
||||
},
|
||||
}
|
||||
nodeInfo.Status.Drivers = []csiv1alpha1.CSIDriverInfoStatus{
|
||||
{
|
||||
Name: "com.example.csi/driver1",
|
||||
Available: true,
|
||||
VolumePluginMechanism: csiv1alpha1.VolumePluginMechanismInTree,
|
||||
},
|
||||
}
|
||||
_, err = client.CsiV1alpha1().CSINodeInfos().Update(nodeInfo)
|
||||
return err
|
||||
}
|
||||
}
|
||||
patchNode1CSINodeInfo := func(client csiclientset.Interface) func() error {
|
||||
return func() error {
|
||||
bs := []byte(fmt.Sprintf(`{"csiDrivers": [ { "driver": "net.example.storage/driver2", "nodeID": "net.example.storage/node1", "topologyKeys": [ "net.example.storage/region" ] } ] }`))
|
||||
// StrategicMergePatch is unsupported by CRs. Falling back to MergePatch
|
||||
_, err := client.CsiV1alpha1().CSINodeInfos().Patch("node1", types.MergePatchType, bs)
|
||||
return err
|
||||
}
|
||||
}
|
||||
deleteNode1CSINodeInfo := func(client csiclientset.Interface) func() error {
|
||||
return func() error {
|
||||
return client.CsiV1alpha1().CSINodeInfos().Delete("node1", &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
nodeanonClient, _ := clientsetForToken(tokenNodeUnknown, clientConfig)
|
||||
node1Client, node1ClientExternal := clientsetForToken(tokenNode1, clientConfig)
|
||||
node2Client, node2ClientExternal := clientsetForToken(tokenNode2, clientConfig)
|
||||
csiNode1Client := csiClientsetForToken(tokenNode1, clientConfig)
|
||||
csiNode2Client := csiClientsetForToken(tokenNode2, clientConfig)
|
||||
|
||||
// all node requests from node1 and unknown node fail
|
||||
expectForbidden(t, getSecret(nodeanonClient))
|
||||
@ -416,7 +530,10 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
expectAllowed(t, createNode2MirrorPodEviction(node2Client))
|
||||
expectAllowed(t, createNode2(node2Client))
|
||||
expectAllowed(t, updateNode2Status(node2Client))
|
||||
expectAllowed(t, deleteNode2(node2Client))
|
||||
// self deletion is not allowed
|
||||
expectForbidden(t, deleteNode2(node2Client))
|
||||
// clean up node2
|
||||
expectAllowed(t, deleteNode2(superuserClient))
|
||||
|
||||
// create a pod as an admin to add object references
|
||||
expectAllowed(t, createNode2NormalPod(superuserClient))
|
||||
@ -507,9 +624,35 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
// node2 can no longer get the configmap after it is unassigned as its config source
|
||||
expectForbidden(t, getConfigMapConfigSource(node2Client))
|
||||
// clean up node2
|
||||
expectAllowed(t, deleteNode2(node2Client))
|
||||
expectAllowed(t, deleteNode2(superuserClient))
|
||||
|
||||
//TODO(mikedanese): integration test node restriction of TokenRequest
|
||||
|
||||
// node1 allowed to operate on its own lease
|
||||
expectAllowed(t, createNode1Lease(node1Client))
|
||||
expectAllowed(t, getNode1Lease(node1Client))
|
||||
expectAllowed(t, updateNode1Lease(node1Client))
|
||||
expectAllowed(t, patchNode1Lease(node1Client))
|
||||
expectAllowed(t, deleteNode1Lease(node1Client))
|
||||
// node2 not allowed to operate on another node's lease
|
||||
expectForbidden(t, createNode1Lease(node2Client))
|
||||
expectForbidden(t, getNode1Lease(node2Client))
|
||||
expectForbidden(t, updateNode1Lease(node2Client))
|
||||
expectForbidden(t, patchNode1Lease(node2Client))
|
||||
expectForbidden(t, deleteNode1Lease(node2Client))
|
||||
|
||||
// node1 allowed to operate on its own CSINodeInfo
|
||||
expectAllowed(t, createNode1CSINodeInfo(csiNode1Client))
|
||||
expectAllowed(t, getNode1CSINodeInfo(csiNode1Client))
|
||||
expectAllowed(t, updateNode1CSINodeInfo(csiNode1Client))
|
||||
expectAllowed(t, patchNode1CSINodeInfo(csiNode1Client))
|
||||
expectAllowed(t, deleteNode1CSINodeInfo(csiNode1Client))
|
||||
// node2 not allowed to operate on another node's CSINodeInfo
|
||||
expectForbidden(t, createNode1CSINodeInfo(csiNode2Client))
|
||||
expectForbidden(t, getNode1CSINodeInfo(csiNode2Client))
|
||||
expectForbidden(t, updateNode1CSINodeInfo(csiNode2Client))
|
||||
expectForbidden(t, patchNode1CSINodeInfo(csiNode2Client))
|
||||
expectForbidden(t, deleteNode1CSINodeInfo(csiNode2Client))
|
||||
}
|
||||
|
||||
// expect executes a function a set number of times until it either returns the
|
||||
@ -549,3 +692,17 @@ func expectAllowed(t *testing.T, f func() error) {
|
||||
t.Errorf("Expected no error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// crdFromManifest reads a .json/yaml file and returns the CRD in it.
|
||||
func crdFromManifest(filename string) (*apiextensionsv1beta1.CustomResourceDefinition, error) {
|
||||
var crd apiextensionsv1beta1.CustomResourceDefinition
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), data, &crd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &crd, nil
|
||||
}
|
||||
|
78
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
78
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -26,8 +27,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
@ -38,7 +40,9 @@ import (
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/client-go/transport"
|
||||
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
@ -72,6 +76,19 @@ func clientsetForToken(user string, config *restclient.Config) (clientset.Interf
|
||||
return clientset.NewForConfigOrDie(&configCopy), externalclientset.NewForConfigOrDie(&configCopy)
|
||||
}
|
||||
|
||||
func crdClientsetForToken(user string, config *restclient.Config) apiextensionsclient.Interface {
|
||||
configCopy := *config
|
||||
configCopy.BearerToken = user
|
||||
return apiextensionsclient.NewForConfigOrDie(&configCopy)
|
||||
}
|
||||
|
||||
func csiClientsetForToken(user string, config *restclient.Config) csiclientset.Interface {
|
||||
configCopy := *config
|
||||
configCopy.BearerToken = user
|
||||
configCopy.ContentType = "application/json" // // csi client works with CRDs that support json only
|
||||
return csiclientset.NewForConfigOrDie(&configCopy)
|
||||
}
|
||||
|
||||
type testRESTOptionsGetter struct {
|
||||
config *master.Config
|
||||
}
|
||||
@ -219,6 +236,15 @@ var (
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
aLimitRange = `
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "LimitRange",
|
||||
"metadata": {
|
||||
"name": "a"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
podNamespace = `
|
||||
{
|
||||
@ -246,6 +272,15 @@ var (
|
||||
"name": "forbidden-namespace"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
limitRangeNamespace = `
|
||||
{
|
||||
"apiVersion": "` + testapi.Groups[api.GroupName].GroupVersion().String() + `",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"name": "limitrange-namespace"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
)
|
||||
|
||||
@ -409,6 +444,40 @@ func TestRBAC(t *testing.T) {
|
||||
{superUser, "DELETE", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "pi", "", http.StatusOK},
|
||||
},
|
||||
},
|
||||
{
|
||||
bootstrapRoles: bootstrapRoles{
|
||||
clusterRoles: []rbacapi.ClusterRole{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "allow-all"},
|
||||
Rules: []rbacapi.PolicyRule{ruleAllowAll},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "update-limitranges"},
|
||||
Rules: []rbacapi.PolicyRule{
|
||||
rbacapi.NewRule("update").Groups("").Resources("limitranges").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterRoleBindings: []rbacapi.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "update-limitranges"},
|
||||
Subjects: []rbacapi.Subject{
|
||||
{Kind: "User", Name: "limitrange-updater"},
|
||||
},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "update-limitranges"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requests: []request{
|
||||
// Create the namespace used later in the test
|
||||
{superUser, "POST", "", "namespaces", "", "", limitRangeNamespace, http.StatusCreated},
|
||||
|
||||
{"limitrange-updater", "PUT", "", "limitranges", "limitrange-namespace", "a", aLimitRange, http.StatusForbidden},
|
||||
{superUser, "PUT", "", "limitranges", "limitrange-namespace", "a", aLimitRange, http.StatusCreated},
|
||||
{superUser, "PUT", "", "limitranges", "limitrange-namespace", "a", aLimitRange, http.StatusOK},
|
||||
{"limitrange-updater", "PUT", "", "limitranges", "limitrange-namespace", "a", aLimitRange, http.StatusOK},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
@ -424,6 +493,7 @@ func TestRBAC(t *testing.T) {
|
||||
"job-writer-namespace": {Name: "job-writer-namespace"},
|
||||
"nonescalating-rolebinding-writer": {Name: "nonescalating-rolebinding-writer"},
|
||||
"pod-reader": {Name: "pod-reader"},
|
||||
"limitrange-updater": {Name: "limitrange-updater"},
|
||||
"user-with-no-permissions": {Name: "user-with-no-permissions"},
|
||||
}))
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
@ -492,7 +562,7 @@ func TestRBAC(t *testing.T) {
|
||||
//
|
||||
// go test -v -tags integration -run RBAC -args -v 10
|
||||
//
|
||||
glog.V(8).Infof("case %d, req %d: %s\n%s\n", i, j, reqDump, respDump)
|
||||
klog.V(8).Infof("case %d, req %d: %s\n%s\n", i, j, reqDump, respDump)
|
||||
t.Errorf("case %d, req %d: %s expected %q got %q", i, j, r, statusCode(r.expectedStatus), statusCode(resp.StatusCode))
|
||||
}
|
||||
|
||||
@ -530,7 +600,9 @@ func TestBootstrapping(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
_, err = watch.Until(30*time.Second, watcher, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Added {
|
||||
return false, nil
|
||||
}
|
||||
|
143
vendor/k8s.io/kubernetes/test/integration/auth/svcaccttoken_test.go
generated
vendored
143
vendor/k8s.io/kubernetes/test/integration/auth/svcaccttoken_test.go
generated
vendored
@ -20,22 +20,25 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/jwt"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
externalclientset "k8s.io/client-go/kubernetes"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
serviceaccountgetter "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
@ -61,22 +64,35 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
pk := sk.(*ecdsa.PrivateKey).PublicKey
|
||||
|
||||
const iss = "https://foo.bar.example.com"
|
||||
aud := []string{"api"}
|
||||
aud := authenticator.Audiences{"api"}
|
||||
|
||||
maxExpirationSeconds := int64(60 * 60)
|
||||
maxExpirationDuration, err := time.ParseDuration(fmt.Sprintf("%ds", maxExpirationSeconds))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
gcs := &clientset.Clientset{}
|
||||
|
||||
// Start the server
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
|
||||
masterConfig.GenericConfig.Authentication.APIAudiences = aud
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = bearertoken.New(
|
||||
serviceaccount.JWTTokenAuthenticator(
|
||||
iss,
|
||||
[]interface{}{&pk},
|
||||
serviceaccount.NewValidator(aud, serviceaccountgetter.NewGetterFromClient(gcs)),
|
||||
aud,
|
||||
serviceaccount.NewValidator(serviceaccountgetter.NewGetterFromClient(gcs)),
|
||||
),
|
||||
)
|
||||
masterConfig.ExtraConfig.ServiceAccountIssuer = serviceaccount.JWTTokenGenerator(iss, sk)
|
||||
masterConfig.ExtraConfig.ServiceAccountAPIAudiences = aud
|
||||
tokenGenerator, err := serviceaccount.JWTTokenGenerator(iss, sk)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
masterConfig.ExtraConfig.ServiceAccountIssuer = tokenGenerator
|
||||
masterConfig.ExtraConfig.ServiceAccountMaxExpiration = maxExpirationDuration
|
||||
masterConfig.GenericConfig.Authentication.APIAudiences = aud
|
||||
|
||||
master, _, closeFn := framework.RunAMaster(masterConfig)
|
||||
defer closeFn()
|
||||
@ -150,7 +166,10 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
info := doTokenReview(t, cs, treq, false)
|
||||
if info.Extra != nil {
|
||||
t.Fatalf("expected Extra to be nil but got: %#v", info.Extra)
|
||||
}
|
||||
delSvcAcct()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
@ -203,7 +222,16 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
info := doTokenReview(t, cs, treq, false)
|
||||
if len(info.Extra) != 2 {
|
||||
t.Fatalf("expected Extra have length of 2 but was length %d: %#v", len(info.Extra), info.Extra)
|
||||
}
|
||||
if expected := map[string]authenticationv1.ExtraValue{
|
||||
"authentication.kubernetes.io/pod-name": {pod.ObjectMeta.Name},
|
||||
"authentication.kubernetes.io/pod-uid": {string(pod.ObjectMeta.UID)},
|
||||
}; !reflect.DeepEqual(info.Extra, expected) {
|
||||
t.Fatalf("unexpected Extra:\ngot:\t%#v\nwant:\t%#v", info.Extra, expected)
|
||||
}
|
||||
delPod()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
@ -438,9 +466,97 @@ func TestServiceAccountTokenCreate(t *testing.T) {
|
||||
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
t.Run("a token request within expiration time", func(t *testing.T) {
|
||||
normalExpirationTime := maxExpirationSeconds - 10*60
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &normalExpirationTime,
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
Name: secret.Name,
|
||||
UID: secret.UID,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
|
||||
originalSecret, originalDelSecret := createDeleteSecret(t, cs, secret)
|
||||
defer originalDelSecret()
|
||||
|
||||
treq.Spec.BoundObjectRef.UID = originalSecret.UID
|
||||
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkPayload(t, treq.Status.Token, `"system:serviceaccount:myns:test-svcacct"`, "sub")
|
||||
checkPayload(t, treq.Status.Token, `["api"]`, "aud")
|
||||
checkPayload(t, treq.Status.Token, `null`, "kubernetes.io", "pod")
|
||||
checkPayload(t, treq.Status.Token, `"test-secret"`, "kubernetes.io", "secret", "name")
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
checkExpiration(t, treq, normalExpirationTime)
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
originalDelSecret()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
|
||||
_, recreateDelSecret := createDeleteSecret(t, cs, secret)
|
||||
defer recreateDelSecret()
|
||||
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
|
||||
t.Run("a token request with out-of-range expiration", func(t *testing.T) {
|
||||
tooLongExpirationTime := maxExpirationSeconds + 10*60
|
||||
treq := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: &tooLongExpirationTime,
|
||||
BoundObjectRef: &authenticationv1.BoundObjectReference{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
Name: secret.Name,
|
||||
UID: secret.UID,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sa, del := createDeleteSvcAcct(t, cs, sa)
|
||||
defer del()
|
||||
|
||||
originalSecret, originalDelSecret := createDeleteSecret(t, cs, secret)
|
||||
defer originalDelSecret()
|
||||
|
||||
treq.Spec.BoundObjectRef.UID = originalSecret.UID
|
||||
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
checkPayload(t, treq.Status.Token, `"system:serviceaccount:myns:test-svcacct"`, "sub")
|
||||
checkPayload(t, treq.Status.Token, `["api"]`, "aud")
|
||||
checkPayload(t, treq.Status.Token, `null`, "kubernetes.io", "pod")
|
||||
checkPayload(t, treq.Status.Token, `"test-secret"`, "kubernetes.io", "secret", "name")
|
||||
checkPayload(t, treq.Status.Token, `"myns"`, "kubernetes.io", "namespace")
|
||||
checkPayload(t, treq.Status.Token, `"test-svcacct"`, "kubernetes.io", "serviceaccount", "name")
|
||||
checkExpiration(t, treq, maxExpirationSeconds)
|
||||
|
||||
doTokenReview(t, cs, treq, false)
|
||||
originalDelSecret()
|
||||
doTokenReview(t, cs, treq, true)
|
||||
|
||||
_, recreateDelSecret := createDeleteSecret(t, cs, secret)
|
||||
defer recreateDelSecret()
|
||||
|
||||
doTokenReview(t, cs, treq, true)
|
||||
})
|
||||
}
|
||||
|
||||
func doTokenReview(t *testing.T, cs externalclientset.Interface, treq *authenticationv1.TokenRequest, expectErr bool) {
|
||||
func doTokenReview(t *testing.T, cs clientset.Interface, treq *authenticationv1.TokenRequest, expectErr bool) authenticationv1.UserInfo {
|
||||
t.Helper()
|
||||
trev, err := cs.AuthenticationV1().TokenReviews().Create(&authenticationv1.TokenReview{
|
||||
Spec: authenticationv1.TokenReviewSpec{
|
||||
@ -460,6 +576,7 @@ func doTokenReview(t *testing.T, cs externalclientset.Interface, treq *authentic
|
||||
if !trev.Status.Authenticated && !expectErr {
|
||||
t.Fatal("expected token to be authenticated but it wasn't")
|
||||
}
|
||||
return trev.Status.User
|
||||
}
|
||||
|
||||
func checkPayload(t *testing.T, tok string, want string, parts ...string) {
|
||||
@ -470,6 +587,16 @@ func checkPayload(t *testing.T, tok string, want string, parts ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkExpiration(t *testing.T, treq *authenticationv1.TokenRequest, expectedExpiration int64) {
|
||||
t.Helper()
|
||||
if treq.Spec.ExpirationSeconds == nil {
|
||||
t.Errorf("unexpected nil expiration seconds.")
|
||||
}
|
||||
if *treq.Spec.ExpirationSeconds != expectedExpiration {
|
||||
t.Errorf("unexpected expiration seconds.\nsaw:\t%d\nwant:\t%d", treq.Spec.ExpirationSeconds, expectedExpiration)
|
||||
}
|
||||
}
|
||||
|
||||
func getSubObject(t *testing.T, b string, parts ...string) string {
|
||||
t.Helper()
|
||||
var obj interface{}
|
||||
|
28
vendor/k8s.io/kubernetes/test/integration/client/BUILD
generated
vendored
28
vendor/k8s.io/kubernetes/test/integration/client/BUILD
generated
vendored
@ -15,25 +15,25 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
135
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
135
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
@ -36,7 +36,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@ -44,13 +44,10 @@ import (
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
client := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
info, err := client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
@ -60,7 +57,7 @@ func TestClient(t *testing.T) {
|
||||
t.Errorf("expected %#v, got %#v", e, a)
|
||||
}
|
||||
|
||||
pods, err := client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
pods, err := client.CoreV1().Pods("default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -72,7 +69,7 @@ func TestClient(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test",
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
@ -83,14 +80,14 @@ func TestClient(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
got, err := client.Core().Pods(ns.Name).Create(pod)
|
||||
got, err := client.CoreV1().Pods("default").Create(pod)
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected non-error: %v", got)
|
||||
}
|
||||
|
||||
// get a created pod
|
||||
pod.Spec.Containers[0].Image = "an-image"
|
||||
got, err = client.Core().Pods(ns.Name).Create(pod)
|
||||
got, err = client.CoreV1().Pods("default").Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -99,7 +96,7 @@ func TestClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// pod is shown, but not scheduled
|
||||
pods, err = client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
pods, err = client.CoreV1().Pods("default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -116,21 +113,18 @@ func TestClient(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtomicPut(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("atomic-put", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
c := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
rcBody := v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
APIVersion: c.CoreV1().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "atomicrc",
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"name": "atomicrc",
|
||||
},
|
||||
@ -154,7 +148,7 @@ func TestAtomicPut(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
rcs := c.Core().ReplicationControllers(ns.Name)
|
||||
rcs := c.CoreV1().ReplicationControllers("default")
|
||||
rc, err := rcs.Create(&rcBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating atomicRC: %v", err)
|
||||
@ -208,23 +202,20 @@ func TestAtomicPut(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPatch(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
c := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
name := "patchpod"
|
||||
resource := "pods"
|
||||
podBody := v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
APIVersion: c.CoreV1().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
@ -233,7 +224,7 @@ func TestPatch(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
pods := c.Core().Pods(ns.Name)
|
||||
pods := c.CoreV1().Pods("default")
|
||||
pod, err := pods.Create(&podBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating patchpods: %v", err)
|
||||
@ -263,12 +254,12 @@ func TestPatch(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pb := patchBodies[c.Core().RESTClient().APIVersion()]
|
||||
pb := patchBodies[c.CoreV1().RESTClient().APIVersion()]
|
||||
|
||||
execPatch := func(pt types.PatchType, body []byte) error {
|
||||
result := c.Core().RESTClient().Patch(pt).
|
||||
result := c.CoreV1().RESTClient().Patch(pt).
|
||||
Resource(resource).
|
||||
Namespace(ns.Name).
|
||||
Namespace("default").
|
||||
Name(name).
|
||||
Body(body).
|
||||
Do()
|
||||
@ -330,18 +321,15 @@ func TestPatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch-with-create", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
c := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
endpointTemplate := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "patchendpoint",
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
@ -352,7 +340,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
patchEndpoint := func(json []byte) (runtime.Object, error) {
|
||||
return c.Core().RESTClient().Patch(types.MergePatchType).Resource("endpoints").Namespace(ns.Name).Name("patchendpoint").Body(json).Do().Get()
|
||||
return c.CoreV1().RESTClient().Patch(types.MergePatchType).Resource("endpoints").Namespace("default").Name("patchendpoint").Body(json).Do().Get()
|
||||
}
|
||||
|
||||
// Make sure patch doesn't get to CreateOnUpdate
|
||||
@ -367,7 +355,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the endpoint (endpoints set AllowCreateOnUpdate=true) to get a UID and resource version
|
||||
createdEndpoint, err := c.Core().Endpoints(ns.Name).Update(endpointTemplate)
|
||||
createdEndpoint, err := c.CoreV1().Endpoints("default").Update(endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint: %v", err)
|
||||
}
|
||||
@ -441,12 +429,12 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAPIVersions(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
c := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
clientVersion := c.Core().RESTClient().APIVersion().String()
|
||||
clientVersion := c.CoreV1().RESTClient().APIVersion().String()
|
||||
g, err := c.Discovery().ServerGroups()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get api versions: %v", err)
|
||||
@ -463,23 +451,20 @@ func TestAPIVersions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSingleWatch(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("single-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
client := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
mkEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("event-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
Reason: fmt.Sprintf("event %v", i),
|
||||
@ -489,7 +474,7 @@ func TestSingleWatch(t *testing.T) {
|
||||
rv1 := ""
|
||||
for i := 0; i < 10; i++ {
|
||||
event := mkEvent(i)
|
||||
got, err := client.Core().Events(ns.Name).Create(event)
|
||||
got, err := client.CoreV1().Events("default").Create(event)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating event %#q: %v", event, err)
|
||||
}
|
||||
@ -502,8 +487,8 @@ func TestSingleWatch(t *testing.T) {
|
||||
t.Logf("Created event %#v", got.ObjectMeta)
|
||||
}
|
||||
|
||||
w, err := client.Core().RESTClient().Get().
|
||||
Namespace(ns.Name).
|
||||
w, err := client.CoreV1().RESTClient().Get().
|
||||
Namespace("default").
|
||||
Resource("events").
|
||||
VersionedParams(&metav1.ListOptions{
|
||||
ResourceVersion: rv1,
|
||||
@ -550,24 +535,21 @@ func TestMultiWatch(t *testing.T) {
|
||||
const watcherCount = 50
|
||||
rt.GOMAXPROCS(watcherCount)
|
||||
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("multi-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
client := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
dummyEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()),
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
Namespace: "default",
|
||||
},
|
||||
Reason: fmt.Sprintf("unrelated change %v", i),
|
||||
}
|
||||
@ -585,7 +567,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
watchesStarted.Add(1)
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
got, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
got, err := client.CoreV1().Pods("default").Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels.Set{"watchlabel": name},
|
||||
@ -606,7 +588,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
LabelSelector: labels.Set{"watchlabel": name}.AsSelector().String(),
|
||||
ResourceVersion: rv,
|
||||
}
|
||||
w, err := client.Core().Pods(ns.Name).Watch(options)
|
||||
w, err := client.CoreV1().Pods("default").Watch(options)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("watch error for %v: %v", name, err))
|
||||
}
|
||||
@ -655,7 +637,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if _, err := client.Core().Events(ns.Name).Create(dummyEvent(i)); err != nil {
|
||||
if _, err := client.CoreV1().Events("default").Create(dummyEvent(i)); err != nil {
|
||||
panic(fmt.Sprintf("couldn't make an event: %v", err))
|
||||
}
|
||||
changeMade <- i
|
||||
@ -692,7 +674,7 @@ func TestMultiWatch(t *testing.T) {
|
||||
return
|
||||
}
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
_, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
_, err := client.CoreV1().Pods("default").Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
@ -726,13 +708,13 @@ func TestMultiWatch(t *testing.T) {
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
go func(i int) {
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
pod, err := client.Core().Pods(ns.Name).Get(name, metav1.GetOptions{})
|
||||
pod, err := client.CoreV1().Pods("default").Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
|
||||
}
|
||||
pod.Spec.Containers[0].Image = imageutils.GetPauseImageName()
|
||||
sentTimes <- timePair{time.Now(), name}
|
||||
if _, err := client.Core().Pods(ns.Name).Update(pod); err != nil {
|
||||
if _, err := client.CoreV1().Pods("default").Update(pod); err != nil {
|
||||
panic(fmt.Sprintf("Couldn't make %v: %v", name, err))
|
||||
}
|
||||
}(i)
|
||||
@ -806,13 +788,10 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s
|
||||
}
|
||||
|
||||
func TestSelfLinkOnNamespace(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("selflink", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
c := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
runSelfLinkTestOnNamespace(t, c, ns.Name)
|
||||
runSelfLinkTestOnNamespace(t, c, "default")
|
||||
}
|
||||
|
127
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
127
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
@ -17,36 +17,32 @@ limitations under the License.
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestDynamicClient(t *testing.T) {
|
||||
_, s, closeFn := framework.RunAMaster(nil)
|
||||
defer closeFn()
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("dynamic-client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
gv := &schema.GroupVersion{Group: "", Version: "v1"}
|
||||
config := &restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: gv},
|
||||
}
|
||||
|
||||
client := clientset.NewForConfigOrDie(config)
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
client := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
dynamicClient, err := dynamic.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating dynamic client: %v", err)
|
||||
}
|
||||
@ -68,13 +64,13 @@ func TestDynamicClient(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := client.Core().Pods(ns.Name).Create(pod)
|
||||
actual, err := client.CoreV1().Pods("default").Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when creating pod: %v", err)
|
||||
}
|
||||
|
||||
// check dynamic list
|
||||
unstructuredList, err := dynamicClient.Resource(resource).Namespace(ns.Name).List(metav1.ListOptions{})
|
||||
unstructuredList, err := dynamicClient.Resource(resource).Namespace("default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
@ -93,7 +89,7 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// check dynamic get
|
||||
unstruct, err := dynamicClient.Resource(resource).Namespace(ns.Name).Get(actual.Name, metav1.GetOptions{})
|
||||
unstruct, err := dynamicClient.Resource(resource).Namespace("default").Get(actual.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when getting pod %q: %v", actual.Name, err)
|
||||
}
|
||||
@ -108,12 +104,12 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
|
||||
// delete the pod dynamically
|
||||
err = dynamicClient.Resource(resource).Namespace(ns.Name).Delete(actual.Name, nil)
|
||||
err = dynamicClient.Resource(resource).Namespace("default").Delete(actual.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when deleting pod: %v", err)
|
||||
}
|
||||
|
||||
list, err := client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
list, err := client.CoreV1().Pods("default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
@ -123,6 +119,89 @@ func TestDynamicClient(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDynamicClientWatch(t *testing.T) {
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(result.ClientConfig)
|
||||
dynamicClient, err := dynamic.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating dynamic client: %v", err)
|
||||
}
|
||||
|
||||
resource := v1.SchemeGroupVersion.WithResource("events")
|
||||
|
||||
mkEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("event-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
Reason: fmt.Sprintf("event %v", i),
|
||||
}
|
||||
}
|
||||
|
||||
rv1 := ""
|
||||
for i := 0; i < 10; i++ {
|
||||
event := mkEvent(i)
|
||||
got, err := client.CoreV1().Events("default").Create(event)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating event %#q: %v", event, err)
|
||||
}
|
||||
if rv1 == "" {
|
||||
rv1 = got.ResourceVersion
|
||||
if rv1 == "" {
|
||||
t.Fatal("did not get a resource version.")
|
||||
}
|
||||
}
|
||||
t.Logf("Created event %#v", got.ObjectMeta)
|
||||
}
|
||||
|
||||
w, err := dynamicClient.Resource(resource).Namespace("default").Watch(metav1.ListOptions{
|
||||
ResourceVersion: rv1,
|
||||
Watch: true,
|
||||
FieldSelector: fields.OneTermEqualSelector("metadata.name", "event-9").String(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed watch: %v", err)
|
||||
}
|
||||
defer w.Stop()
|
||||
|
||||
select {
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Fatalf("watch took longer than %s", wait.ForeverTestTimeout.String())
|
||||
case got, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
t.Fatal("Watch channel closed unexpectedly.")
|
||||
}
|
||||
|
||||
// We expect to see an ADD of event-9 and only event-9. (This
|
||||
// catches a bug where all the events would have been sent down
|
||||
// the channel.)
|
||||
if e, a := watch.Added, got.Type; e != a {
|
||||
t.Errorf("Wanted %v, got %v", e, a)
|
||||
}
|
||||
|
||||
unstructured, ok := got.Object.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
t.Fatalf("Unexpected watch event containing object %#q", got.Object)
|
||||
}
|
||||
event, err := unstructuredToEvent(unstructured)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error converting Unstructured to v1.Event: %v", err)
|
||||
}
|
||||
if e, a := "event-9", event.Name; e != a {
|
||||
t.Errorf("Wanted %v, got %v", e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func unstructuredToPod(obj *unstructured.Unstructured) (*v1.Pod, error) {
|
||||
json, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
|
||||
if err != nil {
|
||||
@ -134,3 +213,13 @@ func unstructuredToPod(obj *unstructured.Unstructured) (*v1.Pod, error) {
|
||||
pod.APIVersion = ""
|
||||
return pod, err
|
||||
}
|
||||
|
||||
func unstructuredToEvent(obj *unstructured.Unstructured) (*v1.Event, error) {
|
||||
json, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
event := new(v1.Event)
|
||||
err = runtime.DecodeInto(testapi.Default.Codec(), json, event)
|
||||
return event, err
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/configmap/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/configmap/BUILD
generated
vendored
@ -14,13 +14,13 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
43
vendor/k8s.io/kubernetes/test/integration/cronjob/BUILD
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/test/integration/cronjob/BUILD
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"cronjob_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/cronjob:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
174
vendor/k8s.io/kubernetes/test/integration/cronjob/cronjob_test.go
generated
vendored
Normal file
174
vendor/k8s.io/kubernetes/test/integration/cronjob/cronjob_test.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cronjob
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/controller/cronjob"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *cronjob.CronJobController, *job.JobController, informers.SharedInformerFactory, clientset.Interface, rest.Config) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, server, closeFn := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: server.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informerSet := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "cronjob-informers")), resyncPeriod)
|
||||
cjc, err := cronjob.NewCronJobController(clientSet)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating CronJob controller: %v", err)
|
||||
}
|
||||
jc := job.NewJobController(informerSet.Core().V1().Pods(), informerSet.Batch().V1().Jobs(), clientSet)
|
||||
|
||||
return server, closeFn, cjc, jc, informerSet, clientSet, config
|
||||
}
|
||||
|
||||
func newCronJob(name, namespace, schedule string) *batchv1beta1.CronJob {
|
||||
zero64 := int64(0)
|
||||
zero32 := int32(0)
|
||||
return &batchv1beta1.CronJob{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "CronJob",
|
||||
APIVersion: "batch/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: batchv1beta1.CronJobSpec{
|
||||
Schedule: schedule,
|
||||
SuccessfulJobsHistoryLimit: &zero32,
|
||||
JobTemplate: batchv1beta1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{Name: "foo", Image: "bar"}},
|
||||
TerminationGracePeriodSeconds: &zero64,
|
||||
RestartPolicy: "Never",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func cleanupCronJobs(t *testing.T, cjClient clientbatchv1beta1.CronJobInterface, name string) {
|
||||
deletePropagation := metav1.DeletePropagationForeground
|
||||
err := cjClient.Delete(name, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete CronJob: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func validateJobAndPod(t *testing.T, clientSet kubernetes.Interface, namespace string) {
|
||||
if err := wait.PollImmediate(1*time.Second, 120*time.Second, func() (bool, error) {
|
||||
jobs, err := clientSet.BatchV1().Jobs(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list jobs: %v", err)
|
||||
}
|
||||
|
||||
if len(jobs.Items) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, j := range jobs.Items {
|
||||
ownerReferences := j.ObjectMeta.OwnerReferences
|
||||
if refCount := len(ownerReferences); refCount != 1 {
|
||||
return false, fmt.Errorf("job %s has %d OwnerReferences, expected only 1", j.Name, refCount)
|
||||
}
|
||||
|
||||
reference := ownerReferences[0]
|
||||
if reference.Kind != "CronJob" {
|
||||
return false, fmt.Errorf("job %s has OwnerReference with Kind %s, expected CronJob", j.Name, reference.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
pods, err := clientSet.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
||||
if len(pods.Items) != 1 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
ownerReferences := pod.ObjectMeta.OwnerReferences
|
||||
if refCount := len(ownerReferences); refCount != 1 {
|
||||
return false, fmt.Errorf("pod %s has %d OwnerReferences, expected only 1", pod.Name, refCount)
|
||||
}
|
||||
|
||||
reference := ownerReferences[0]
|
||||
if reference.Kind != "Job" {
|
||||
return false, fmt.Errorf("pod %s has OwnerReference with Kind %s, expected Job", pod.Name, reference.Kind)
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCronJobLaunchesPodAndCleansUp(t *testing.T) {
|
||||
server, closeFn, cjc, jc, informerSet, clientSet, _ := setup(t)
|
||||
defer closeFn()
|
||||
|
||||
cronJobName := "foo"
|
||||
namespaceName := "simple-cronjob-test"
|
||||
|
||||
ns := framework.CreateTestingNamespace(namespaceName, server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
cjClient := clientSet.BatchV1beta1().CronJobs(ns.Name)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informerSet.Start(stopCh)
|
||||
go cjc.Run(stopCh)
|
||||
go jc.Run(1, stopCh)
|
||||
|
||||
_, err := cjClient.Create(newCronJob(cronJobName, ns.Name, "* * * * ?"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create CronJob: %v", err)
|
||||
}
|
||||
defer cleanupCronJobs(t, cjClient, cronJobName)
|
||||
|
||||
validateJobAndPod(t, clientSet, namespaceName)
|
||||
}
|
26
vendor/k8s.io/kubernetes/test/integration/cronjob/main_test.go
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/test/integration/cronjob/main_test.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cronjob
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
39
vendor/k8s.io/kubernetes/test/integration/daemonset/BUILD
generated
vendored
39
vendor/k8s.io/kubernetes/test/integration/daemonset/BUILD
generated
vendored
@ -16,30 +16,33 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/daemonset/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/daemonset/OWNERS
generated
vendored
@ -7,3 +7,5 @@ reviewers:
|
||||
- lukaszo
|
||||
- janetkuo
|
||||
- kow3ns
|
||||
labels:
|
||||
- sig/apps
|
||||
|
406
vendor/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go
generated
vendored
406
vendor/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go
generated
vendored
@ -39,16 +39,19 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -65,13 +68,13 @@ func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonS
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-informers")), resyncPeriod)
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("daemon_controller")
|
||||
dc, err := daemon.NewDaemonSetsController(
|
||||
informers.Apps().V1().DaemonSets(),
|
||||
informers.Apps().V1().ControllerRevisions(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().Nodes(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-controller")),
|
||||
flowcontrol.NewBackOff(5*time.Second, 15*time.Minute),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
@ -91,23 +94,27 @@ func setupScheduler(
|
||||
return
|
||||
}
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
cs,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true,
|
||||
false,
|
||||
)
|
||||
// Enable Features.
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(&factory.ConfigFactoryArgs{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
Client: cs,
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
PvInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
PvcInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ReplicationControllerInformer: informerFactory.Core().V1().ReplicationControllers(),
|
||||
ReplicaSetInformer: informerFactory.Apps().V1().ReplicaSets(),
|
||||
StatefulSetInformer: informerFactory.Apps().V1().StatefulSets(),
|
||||
ServiceInformer: informerFactory.Core().V1().Services(),
|
||||
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
EnableEquivalenceClassCache: false,
|
||||
DisablePreemption: false,
|
||||
PercentageOfNodesToScore: 100,
|
||||
})
|
||||
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
@ -293,7 +300,8 @@ func validateDaemonSetPodsAndMarkReady(
|
||||
podClient corev1typed.PodInterface,
|
||||
podInformer cache.SharedIndexInformer,
|
||||
numberPods int,
|
||||
t *testing.T) {
|
||||
t *testing.T,
|
||||
) {
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) != numberPods {
|
||||
@ -372,6 +380,52 @@ func waitForPodsCreated(podInformer cache.SharedIndexInformer, num int) error {
|
||||
})
|
||||
}
|
||||
|
||||
func waitForDaemonSetAndControllerRevisionCreated(c clientset.Interface, name string, namespace string) error {
|
||||
return wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||
ds, err := c.AppsV1().DaemonSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ds == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
revs, err := c.AppsV1().ControllerRevisions(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if revs.Size() == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, rev := range revs.Items {
|
||||
for _, oref := range rev.OwnerReferences {
|
||||
if oref.Kind == "DaemonSet" && oref.UID == ds.UID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func hashAndNameForDaemonSet(ds *apps.DaemonSet) (string, string) {
|
||||
hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
|
||||
name := ds.Name + "-" + hash
|
||||
return hash, name
|
||||
}
|
||||
|
||||
func validateDaemonSetCollisionCount(dsClient appstyped.DaemonSetInterface, dsName string, expCount int32, t *testing.T) {
|
||||
ds, err := dsClient.Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to look up DaemonSet: %v", err)
|
||||
}
|
||||
collisionCount := ds.Status.CollisionCount
|
||||
if *collisionCount != expCount {
|
||||
t.Fatalf("Expected collisionCount to be %d, but found %d", expCount, *collisionCount)
|
||||
}
|
||||
}
|
||||
|
||||
func validateDaemonSetStatus(
|
||||
dsClient appstyped.DaemonSetInterface,
|
||||
dsName string,
|
||||
@ -413,16 +467,36 @@ func validateFailedPlacementEvent(eventClient corev1typed.EventInterface, t *tes
|
||||
}
|
||||
}
|
||||
|
||||
func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string, updateFunc func(*apps.DaemonSet)) *apps.DaemonSet {
|
||||
var ds *apps.DaemonSet
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
newDS, err := dsClient.Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateFunc(newDS)
|
||||
ds, err = dsClient.Update(newDS)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to update DaemonSet: %v", err)
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) {
|
||||
for _, fg := range featureGates() {
|
||||
func() {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", fg, enabled)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, f := range []bool{true, false} {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", fg, f)
|
||||
}
|
||||
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
|
||||
}
|
||||
}()
|
||||
@ -556,7 +630,7 @@ func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
@ -630,7 +704,23 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err)
|
||||
}
|
||||
}
|
||||
|
||||
// When ScheduleDaemonSetPods is disabled, DaemonSets should not launch onto nodes with insufficient capacity.
|
||||
// Look for TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled, we don't need this test anymore.
|
||||
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||
// Rollback feature gate.
|
||||
defer func() {
|
||||
if enabled {
|
||||
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
|
||||
}
|
||||
}()
|
||||
setFeatureGate(t, features.ScheduleDaemonSetPods, false)
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
@ -673,11 +763,15 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||
features.ScheduleDaemonSetPods, enabled))
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||
features.ScheduleDaemonSetPods, enabled)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, enabled)
|
||||
}
|
||||
}()
|
||||
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true))
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, true)
|
||||
}
|
||||
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
@ -740,3 +834,261 @@ func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
|
||||
})
|
||||
}
|
||||
|
||||
// TestLaunchWithHashCollision tests that a DaemonSet can be updated even if there is a
|
||||
// hash collision with an existing ControllerRevision
|
||||
func TestLaunchWithHashCollision(t *testing.T) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(1, stopCh)
|
||||
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
// Create single node
|
||||
_, err := nodeClient.Create(newNode("single-node", nil))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Create new DaemonSet with RollingUpdate strategy
|
||||
orgDs := newDaemonSet("foo", ns.Name)
|
||||
oneIntString := intstr.FromInt(1)
|
||||
orgDs.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: &oneIntString,
|
||||
},
|
||||
}
|
||||
ds, err := dsClient.Create(orgDs)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the DaemonSet to be created before proceeding
|
||||
err = waitForDaemonSetAndControllerRevisionCreated(clientset, ds.Name, ds.Namespace)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
ds, err = dsClient.Get(ds.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get DaemonSet: %v", err)
|
||||
}
|
||||
var orgCollisionCount int32
|
||||
if ds.Status.CollisionCount != nil {
|
||||
orgCollisionCount = *ds.Status.CollisionCount
|
||||
}
|
||||
|
||||
// Look up the ControllerRevision for the DaemonSet
|
||||
_, name := hashAndNameForDaemonSet(ds)
|
||||
revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil || revision == nil {
|
||||
t.Fatalf("Failed to look up ControllerRevision: %v", err)
|
||||
}
|
||||
|
||||
// Create a "fake" ControllerRevision that we know will create a hash collision when we make
|
||||
// the next update
|
||||
one := int64(1)
|
||||
ds.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||
|
||||
newHash, newName := hashAndNameForDaemonSet(ds)
|
||||
newRevision := &apps.ControllerRevision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: newName,
|
||||
Namespace: ds.Namespace,
|
||||
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, newHash),
|
||||
Annotations: ds.Annotations,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, apps.SchemeGroupVersion.WithKind("DaemonSet"))},
|
||||
},
|
||||
Data: revision.Data,
|
||||
Revision: revision.Revision + 1,
|
||||
}
|
||||
_, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(newRevision)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ControllerRevision: %v", err)
|
||||
}
|
||||
|
||||
// Make an update of the DaemonSet which we know will create a hash collision when
|
||||
// the next ControllerRevision is created.
|
||||
ds = updateDS(t, dsClient, ds.Name, func(updateDS *apps.DaemonSet) {
|
||||
updateDS.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
|
||||
})
|
||||
|
||||
// Wait for any pod with the latest Spec to exist
|
||||
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
for _, object := range objects {
|
||||
pod := object.(*v1.Pod)
|
||||
if *pod.Spec.TerminationGracePeriodSeconds == *ds.Spec.Template.Spec.TerminationGracePeriodSeconds {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to wait for Pods with the latest Spec to be created: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetCollisionCount(dsClient, ds.Name, orgCollisionCount+1, t)
|
||||
}
|
||||
|
||||
// TestTaintedNode tests that no matter "ScheduleDaemonSetPods" feature is enabled or not
|
||||
// tainted node isn't expected to have pod scheduled
|
||||
func TestTaintedNode(t *testing.T) {
|
||||
forEachFeatureGate(t, func(t *testing.T) {
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("tainted-node", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Start Scheduler
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
ds, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
defer cleanupDaemonSets(t, clientset, ds)
|
||||
|
||||
nodeWithTaint := newNode("node-with-taint", nil)
|
||||
nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}}
|
||||
_, err = nodeClient.Create(nodeWithTaint)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create nodeWithTaint: %v", err)
|
||||
}
|
||||
|
||||
nodeWithoutTaint := newNode("node-without-taint", nil)
|
||||
_, err = nodeClient.Create(nodeWithoutTaint)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create nodeWithoutTaint: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
|
||||
|
||||
// remove taint from nodeWithTaint
|
||||
nodeWithTaint, err = nodeClient.Get("node-with-taint", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve nodeWithTaint: %v", err)
|
||||
}
|
||||
nodeWithTaintCopy := nodeWithTaint.DeepCopy()
|
||||
nodeWithTaintCopy.Spec.Taints = []v1.Taint{}
|
||||
_, err = nodeClient.Update(nodeWithTaintCopy)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update nodeWithTaint: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 2, t)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled
|
||||
// to the Unschedulable nodes when TaintNodesByCondition are enabled.
|
||||
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||
enabledTaint := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition)
|
||||
defer func() {
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
|
||||
features.TaintNodesByCondition, enabledTaint)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, enabledTaint)
|
||||
}
|
||||
}()
|
||||
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TaintNodesByCondition, true)); err != nil {
|
||||
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, true)
|
||||
}
|
||||
|
||||
forEachFeatureGate(t, func(t *testing.T) {
|
||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||
server, closeFn, dc, informers, clientset := setup(t)
|
||||
defer closeFn()
|
||||
ns := framework.CreateTestingNamespace("daemonset-unschedulable-test", server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
|
||||
podClient := clientset.CoreV1().Pods(ns.Name)
|
||||
nodeClient := clientset.CoreV1().Nodes()
|
||||
podInformer := informers.Core().V1().Pods().Informer()
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informers.Start(stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Start Scheduler
|
||||
setupScheduler(t, clientset, informers, stopCh)
|
||||
|
||||
ds := newDaemonSet("foo", ns.Name)
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
ds.Spec.Template.Spec.HostNetwork = true
|
||||
_, err := dsClient.Create(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
defer cleanupDaemonSets(t, clientset, ds)
|
||||
|
||||
// Creates unschedulable node.
|
||||
node := newNode("unschedulable-node", nil)
|
||||
node.Spec.Unschedulable = true
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = nodeClient.Create(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Creates network-unavailable node.
|
||||
nodeNU := newNode("network-unavailable-node", nil)
|
||||
nodeNU.Status.Conditions = []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionFalse},
|
||||
{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue},
|
||||
}
|
||||
nodeNU.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = nodeClient.Create(nodeNU)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
|
||||
validateDaemonSetStatus(dsClient, ds.Name, 2, t)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/test/integration/defaulttolerationseconds/BUILD
generated
vendored
@ -18,14 +18,14 @@ go_test(
|
||||
],
|
||||
deps = [
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//plugin/pkg/admission/defaulttolerationseconds:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@ -64,14 +64,14 @@ func TestAdmission(t *testing.T) {
|
||||
|
||||
var defaultSeconds int64 = 300
|
||||
nodeNotReady := v1.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultSeconds,
|
||||
}
|
||||
|
||||
nodeUnreachable := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &defaultSeconds,
|
||||
|
35
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
35
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
@ -17,15 +17,15 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -38,18 +38,17 @@ go_library(
|
||||
"//pkg/controller/deployment:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/deployment/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/deployment/OWNERS
generated
vendored
@ -5,3 +5,5 @@ reviewers:
|
||||
approvers:
|
||||
- janetkuo
|
||||
- kargakis
|
||||
labels:
|
||||
- sig/apps
|
||||
|
14
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
@ -30,8 +30,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func TestNewDeployment(t *testing.T) {
|
||||
@ -681,6 +681,10 @@ func checkRSHashLabels(rs *apps.ReplicaSet) (string, error) {
|
||||
return "", fmt.Errorf("unexpected replicaset %s missing required pod-template-hash labels", rs.Name)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(rs.Name, hash) {
|
||||
return "", fmt.Errorf("unexpected replicaset %s name suffix doesn't match hash %s", rs.Name, hash)
|
||||
}
|
||||
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
@ -1265,9 +1269,7 @@ func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, re
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
kind := "Deployment"
|
||||
scaleClient := tester.c.ExtensionsV1beta1().Scales(ns)
|
||||
scale, err := scaleClient.Get(kind, deploymentName)
|
||||
scale, err := tester.c.AppsV1().Deployments(ns).GetScale(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain scale subresource for deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
@ -1276,12 +1278,12 @@ func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, re
|
||||
}
|
||||
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
scale, err := scaleClient.Get(kind, deploymentName)
|
||||
scale, err := tester.c.AppsV1().Deployments(ns).GetScale(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scale.Spec.Replicas = replicas
|
||||
_, err = scaleClient.Update(kind, scale)
|
||||
_, err = tester.c.AppsV1().Deployments(ns).UpdateScale(deploymentName, scale)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to set .Spec.Replicas of scale subresource for deployment %q: %v", deploymentName, err)
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
@ -36,7 +36,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/deployment"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
@ -67,7 +66,7 @@ func testLabels() map[string]string {
|
||||
return map[string]string{"name": "test"}
|
||||
}
|
||||
|
||||
// newDeployment returns a RollingUpdate Deployment with with a fake container image
|
||||
// newDeployment returns a RollingUpdate Deployment with a fake container image
|
||||
func newDeployment(name, ns string, replicas int32) *apps.Deployment {
|
||||
return &apps.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -156,7 +155,6 @@ func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.R
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-informers")), resyncPeriod)
|
||||
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("deployment_controller")
|
||||
dc, err := deployment.NewDeploymentController(
|
||||
informers.Apps().V1().Deployments(),
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
|
47
vendor/k8s.io/kubernetes/test/integration/dryrun/BUILD
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/test/integration/dryrun/BUILD
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"dryrun_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//test/integration/etcd:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
vendor/k8s.io/kubernetes/test/integration/dryrun/OWNERS
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/test/integration/dryrun/OWNERS
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
approvers:
|
||||
- apelisse
|
||||
- deads2k
|
||||
reviewers:
|
||||
- deads2k
|
||||
- liggitt
|
||||
- lavalamp
|
276
vendor/k8s.io/kubernetes/test/integration/dryrun/dryrun_test.go
generated
vendored
Normal file
276
vendor/k8s.io/kubernetes/test/integration/dryrun/dryrun_test.go
generated
vendored
Normal file
@ -0,0 +1,276 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dryrun
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/integration/etcd"
|
||||
)
|
||||
|
||||
// Only add kinds to this list when this a virtual resource with get and create verbs that doesn't actually
|
||||
// store into it's kind. We've used this downstream for mappings before.
|
||||
var kindWhiteList = sets.NewString()
|
||||
|
||||
// namespace used for all tests, do not change this
|
||||
const testNamespace = "dryrunnamespace"
|
||||
|
||||
func DryRunCreateTest(t *testing.T, rsc dynamic.ResourceInterface, obj *unstructured.Unstructured, gvResource schema.GroupVersionResource) {
|
||||
createdObj, err := rsc.Create(obj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dry-run create stub for %s: %#v", gvResource, err)
|
||||
}
|
||||
if obj.GroupVersionKind() != createdObj.GroupVersionKind() {
|
||||
t.Fatalf("created object doesn't have the same gvk as original object: got %v, expected %v",
|
||||
createdObj.GroupVersionKind(),
|
||||
obj.GroupVersionKind())
|
||||
}
|
||||
|
||||
if _, err := rsc.Get(obj.GetName(), metav1.GetOptions{}); !errors.IsNotFound(err) {
|
||||
t.Fatalf("object shouldn't exist: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func DryRunPatchTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
patch := []byte(`{"metadata":{"annotations":{"patch": "true"}}}`)
|
||||
obj, err := rsc.Patch(name, types.MergePatchType, patch, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dry-run patch object: %v", err)
|
||||
}
|
||||
if v := obj.GetAnnotations()["patch"]; v != "true" {
|
||||
t.Fatalf("dry-run patched annotations should be returned, got: %v", obj.GetAnnotations())
|
||||
}
|
||||
obj, err = rsc.Get(obj.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
if v := obj.GetAnnotations()["patch"]; v == "true" {
|
||||
t.Fatalf("dry-run patched annotations should not be persisted, got: %v", obj.GetAnnotations())
|
||||
}
|
||||
}
|
||||
|
||||
func getReplicasOrFail(t *testing.T, obj *unstructured.Unstructured) int64 {
|
||||
t.Helper()
|
||||
replicas, found, err := unstructured.NestedInt64(obj.UnstructuredContent(), "spec", "replicas")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get int64 for replicas: %v", err)
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("object doesn't have spec.replicas")
|
||||
}
|
||||
return replicas
|
||||
}
|
||||
|
||||
func DryRunScalePatchTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
obj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
||||
if errors.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
|
||||
replicas := getReplicasOrFail(t, obj)
|
||||
patch := []byte(`{"spec":{"replicas":10}}`)
|
||||
patchedObj, err := rsc.Patch(name, types.MergePatchType, patch, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}}, "scale")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dry-run patch object: %v", err)
|
||||
}
|
||||
if newReplicas := getReplicasOrFail(t, patchedObj); newReplicas != 10 {
|
||||
t.Fatalf("dry-run patch to replicas didn't return new value: %v", newReplicas)
|
||||
}
|
||||
persistedObj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get scale sub-resource")
|
||||
}
|
||||
if newReplicas := getReplicasOrFail(t, persistedObj); newReplicas != replicas {
|
||||
t.Fatalf("number of replicas changed, expected %v, got %v", replicas, newReplicas)
|
||||
}
|
||||
}
|
||||
|
||||
func DryRunScaleUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
obj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
||||
if errors.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
|
||||
replicas := getReplicasOrFail(t, obj)
|
||||
if err := unstructured.SetNestedField(obj.Object, int64(10), "spec", "replicas"); err != nil {
|
||||
t.Fatalf("failed to set spec.replicas: %v", err)
|
||||
}
|
||||
updatedObj, err := rsc.Update(obj, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}}, "scale")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dry-run update scale sub-resource: %v", err)
|
||||
}
|
||||
if newReplicas := getReplicasOrFail(t, updatedObj); newReplicas != 10 {
|
||||
t.Fatalf("dry-run update to replicas didn't return new value: %v", newReplicas)
|
||||
}
|
||||
persistedObj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get scale sub-resource")
|
||||
}
|
||||
if newReplicas := getReplicasOrFail(t, persistedObj); newReplicas != replicas {
|
||||
t.Fatalf("number of replicas changed, expected %v, got %v", replicas, newReplicas)
|
||||
}
|
||||
}
|
||||
|
||||
func DryRunUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
var err error
|
||||
var obj *unstructured.Unstructured
|
||||
for i := 0; i < 3; i++ {
|
||||
obj, err = rsc.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve object: %v", err)
|
||||
}
|
||||
obj.SetAnnotations(map[string]string{"update": "true"})
|
||||
obj, err = rsc.Update(obj, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}})
|
||||
if err == nil || !errors.IsConflict(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dry-run update resource: %v", err)
|
||||
}
|
||||
if v := obj.GetAnnotations()["update"]; v != "true" {
|
||||
t.Fatalf("dry-run updated annotations should be returned, got: %v", obj.GetAnnotations())
|
||||
}
|
||||
|
||||
obj, err = rsc.Get(obj.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
if v := obj.GetAnnotations()["update"]; v == "true" {
|
||||
t.Fatalf("dry-run updated annotations should not be persisted, got: %v", obj.GetAnnotations())
|
||||
}
|
||||
}
|
||||
|
||||
func DryRunDeleteCollectionTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
err := rsc.DeleteCollection(&metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}}, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("dry-run delete collection failed: %v", err)
|
||||
}
|
||||
obj, err := rsc.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
ts := obj.GetDeletionTimestamp()
|
||||
if ts != nil {
|
||||
t.Fatalf("object has a deletion timestamp after dry-run delete collection")
|
||||
}
|
||||
}
|
||||
|
||||
func DryRunDeleteTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||
err := rsc.Delete(name, &metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}})
|
||||
if err != nil {
|
||||
t.Fatalf("dry-run delete failed: %v", err)
|
||||
}
|
||||
obj, err := rsc.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get object: %v", err)
|
||||
}
|
||||
ts := obj.GetDeletionTimestamp()
|
||||
if ts != nil {
|
||||
t.Fatalf("object has a deletion timestamp after dry-run delete")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDryRun tests dry-run on all types.
|
||||
func TestDryRun(t *testing.T) {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DryRun, true)()
|
||||
|
||||
master := etcd.StartRealMasterOrDie(t)
|
||||
defer master.Cleanup()
|
||||
|
||||
if _, err := master.Client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dryrunData := etcd.GetEtcdStorageData()
|
||||
|
||||
// dry run specific stub overrides
|
||||
for resource, stub := range map[schema.GroupVersionResource]string{
|
||||
// need to change event's namespace field to match dry run test
|
||||
gvr("", "v1", "events"): `{"involvedObject": {"namespace": "dryrunnamespace"}, "message": "some data here", "metadata": {"name": "event1"}}`,
|
||||
} {
|
||||
data := dryrunData[resource]
|
||||
data.Stub = stub
|
||||
dryrunData[resource] = data
|
||||
}
|
||||
|
||||
for _, resourceToTest := range master.Resources {
|
||||
t.Run(resourceToTest.Mapping.Resource.String(), func(t *testing.T) {
|
||||
mapping := resourceToTest.Mapping
|
||||
gvk := resourceToTest.Mapping.GroupVersionKind
|
||||
gvResource := resourceToTest.Mapping.Resource
|
||||
kind := gvk.Kind
|
||||
|
||||
if kindWhiteList.Has(kind) {
|
||||
t.Skip("whitelisted")
|
||||
}
|
||||
|
||||
testData, hasTest := dryrunData[gvResource]
|
||||
|
||||
if !hasTest {
|
||||
t.Fatalf("no test data for %s. Please add a test for your new type to etcd.GetEtcdStorageData().", gvResource)
|
||||
}
|
||||
|
||||
rsc, obj, err := etcd.JSONToUnstructured(testData.Stub, testNamespace, mapping, master.Dynamic)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to unmarshal stub (%v): %v", testData.Stub, err)
|
||||
}
|
||||
|
||||
name := obj.GetName()
|
||||
|
||||
DryRunCreateTest(t, rsc, obj, gvResource)
|
||||
|
||||
if _, err := rsc.Create(obj, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("failed to create stub for %s: %#v", gvResource, err)
|
||||
}
|
||||
|
||||
DryRunUpdateTest(t, rsc, name)
|
||||
DryRunPatchTest(t, rsc, name)
|
||||
DryRunScalePatchTest(t, rsc, name)
|
||||
DryRunScaleUpdateTest(t, rsc, name)
|
||||
if resourceToTest.HasDeleteCollection {
|
||||
DryRunDeleteCollectionTest(t, rsc, name)
|
||||
}
|
||||
DryRunDeleteTest(t, rsc, name)
|
||||
|
||||
if err = rsc.Delete(obj.GetName(), metav1.NewDeleteOptions(0)); err != nil {
|
||||
t.Fatalf("deleting final object failed: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func gvr(g, v, r string) schema.GroupVersionResource {
|
||||
return schema.GroupVersionResource{Group: g, Version: v, Resource: r}
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/dryrun/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/dryrun/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dryrun
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
70
vendor/k8s.io/kubernetes/test/integration/etcd/BUILD
generated
vendored
70
vendor/k8s.io/kubernetes/test/integration/etcd/BUILD
generated
vendored
@ -1,9 +1,6 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
@ -12,36 +9,23 @@ go_test(
|
||||
"etcd_storage_path_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -57,3 +41,35 @@ filegroup(
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"data.go",
|
||||
"server.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/etcd",
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3/concurrency:go_default_library",
|
||||
],
|
||||
)
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/etcd/OWNERS
generated
vendored
Executable file
6
vendor/k8s.io/kubernetes/test/integration/etcd/OWNERS
generated
vendored
Executable file
@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- enj
|
||||
reviewers:
|
||||
- deads2k
|
||||
- liggitt
|
||||
- enj
|
559
vendor/k8s.io/kubernetes/test/integration/etcd/data.go
generated
vendored
Normal file
559
vendor/k8s.io/kubernetes/test/integration/etcd/data.go
generated
vendored
Normal file
@ -0,0 +1,559 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GetEtcdStorageData returns etcd data for all persisted objects.
|
||||
// It is exported so that it can be reused across multiple tests.
|
||||
// It returns a new map on every invocation to prevent different tests from mutating shared state.
|
||||
func GetEtcdStorageData() map[schema.GroupVersionResource]StorageData {
|
||||
return map[schema.GroupVersionResource]StorageData{
|
||||
// k8s.io/kubernetes/pkg/api/v1
|
||||
gvr("", "v1", "configmaps"): {
|
||||
Stub: `{"data": {"foo": "bar"}, "metadata": {"name": "cm1"}}`,
|
||||
ExpectedEtcdPath: "/registry/configmaps/etcdstoragepathtestnamespace/cm1",
|
||||
},
|
||||
gvr("", "v1", "services"): {
|
||||
Stub: `{"metadata": {"name": "service1"}, "spec": {"externalName": "service1name", "ports": [{"port": 10000, "targetPort": 11000}], "selector": {"test": "data"}}}`,
|
||||
ExpectedEtcdPath: "/registry/services/specs/etcdstoragepathtestnamespace/service1",
|
||||
},
|
||||
gvr("", "v1", "podtemplates"): {
|
||||
Stub: `{"metadata": {"name": "pt1name"}, "template": {"metadata": {"labels": {"pt": "01"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container9"}]}}}`,
|
||||
ExpectedEtcdPath: "/registry/podtemplates/etcdstoragepathtestnamespace/pt1name",
|
||||
},
|
||||
gvr("", "v1", "pods"): {
|
||||
Stub: `{"metadata": {"name": "pod1"}, "spec": {"containers": [{"image": "fedora:latest", "name": "container7", "resources": {"limits": {"cpu": "1M"}, "requests": {"cpu": "1M"}}}]}}`,
|
||||
ExpectedEtcdPath: "/registry/pods/etcdstoragepathtestnamespace/pod1",
|
||||
},
|
||||
gvr("", "v1", "endpoints"): {
|
||||
Stub: `{"metadata": {"name": "ep1name"}, "subsets": [{"addresses": [{"hostname": "bar-001", "ip": "192.168.3.1"}], "ports": [{"port": 8000}]}]}`,
|
||||
ExpectedEtcdPath: "/registry/services/endpoints/etcdstoragepathtestnamespace/ep1name",
|
||||
},
|
||||
gvr("", "v1", "resourcequotas"): {
|
||||
Stub: `{"metadata": {"name": "rq1name"}, "spec": {"hard": {"cpu": "5M"}}}`,
|
||||
ExpectedEtcdPath: "/registry/resourcequotas/etcdstoragepathtestnamespace/rq1name",
|
||||
},
|
||||
gvr("", "v1", "limitranges"): {
|
||||
Stub: `{"metadata": {"name": "lr1name"}, "spec": {"limits": [{"type": "Pod"}]}}`,
|
||||
ExpectedEtcdPath: "/registry/limitranges/etcdstoragepathtestnamespace/lr1name",
|
||||
},
|
||||
gvr("", "v1", "namespaces"): {
|
||||
Stub: `{"metadata": {"name": "namespace1"}, "spec": {"finalizers": ["kubernetes"]}}`,
|
||||
ExpectedEtcdPath: "/registry/namespaces/namespace1",
|
||||
},
|
||||
gvr("", "v1", "nodes"): {
|
||||
Stub: `{"metadata": {"name": "node1"}, "spec": {"unschedulable": true}}`,
|
||||
ExpectedEtcdPath: "/registry/minions/node1",
|
||||
},
|
||||
gvr("", "v1", "persistentvolumes"): {
|
||||
Stub: `{"metadata": {"name": "pv1name"}, "spec": {"accessModes": ["ReadWriteOnce"], "capacity": {"storage": "3M"}, "hostPath": {"path": "/tmp/test/"}}}`,
|
||||
ExpectedEtcdPath: "/registry/persistentvolumes/pv1name",
|
||||
},
|
||||
gvr("", "v1", "events"): {
|
||||
Stub: `{"involvedObject": {"namespace": "etcdstoragepathtestnamespace"}, "message": "some data here", "metadata": {"name": "event1"}}`,
|
||||
ExpectedEtcdPath: "/registry/events/etcdstoragepathtestnamespace/event1",
|
||||
},
|
||||
gvr("", "v1", "persistentvolumeclaims"): {
|
||||
Stub: `{"metadata": {"name": "pvc1"}, "spec": {"accessModes": ["ReadWriteOnce"], "resources": {"limits": {"storage": "1M"}, "requests": {"storage": "2M"}}, "selector": {"matchLabels": {"pvc": "stuff"}}}}`,
|
||||
ExpectedEtcdPath: "/registry/persistentvolumeclaims/etcdstoragepathtestnamespace/pvc1",
|
||||
},
|
||||
gvr("", "v1", "serviceaccounts"): {
|
||||
Stub: `{"metadata": {"name": "sa1name"}, "secrets": [{"name": "secret00"}]}`,
|
||||
ExpectedEtcdPath: "/registry/serviceaccounts/etcdstoragepathtestnamespace/sa1name",
|
||||
},
|
||||
gvr("", "v1", "secrets"): {
|
||||
Stub: `{"data": {"key": "ZGF0YSBmaWxl"}, "metadata": {"name": "secret1"}}`,
|
||||
ExpectedEtcdPath: "/registry/secrets/etcdstoragepathtestnamespace/secret1",
|
||||
},
|
||||
gvr("", "v1", "replicationcontrollers"): {
|
||||
Stub: `{"metadata": {"name": "rc1"}, "spec": {"selector": {"new": "stuff"}, "template": {"metadata": {"labels": {"new": "stuff"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container8"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/controllers/etcdstoragepathtestnamespace/rc1",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/apps/v1beta1
|
||||
gvr("apps", "v1beta1", "statefulsets"): {
|
||||
Stub: `{"metadata": {"name": "ss1"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}}}}`,
|
||||
ExpectedEtcdPath: "/registry/statefulsets/etcdstoragepathtestnamespace/ss1",
|
||||
ExpectedGVK: gvkP("apps", "v1", "StatefulSet"),
|
||||
},
|
||||
gvr("apps", "v1beta1", "deployments"): {
|
||||
Stub: `{"metadata": {"name": "deployment2"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/deployments/etcdstoragepathtestnamespace/deployment2",
|
||||
ExpectedGVK: gvkP("apps", "v1", "Deployment"),
|
||||
},
|
||||
gvr("apps", "v1beta1", "controllerrevisions"): {
|
||||
Stub: `{"metadata":{"name":"crs1"},"data":{"name":"abc","namespace":"default","creationTimestamp":null,"Spec":{"Replicas":0,"Selector":{"matchLabels":{"foo":"bar"}},"Template":{"creationTimestamp":null,"labels":{"foo":"bar"},"Spec":{"Volumes":null,"InitContainers":null,"Containers":null,"RestartPolicy":"Always","TerminationGracePeriodSeconds":null,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"","AutomountServiceAccountToken":null,"NodeName":"","SecurityContext":null,"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"","Tolerations":null,"HostAliases":null}},"VolumeClaimTemplates":null,"ServiceName":""},"Status":{"ObservedGeneration":null,"Replicas":0}},"revision":0}`,
|
||||
ExpectedEtcdPath: "/registry/controllerrevisions/etcdstoragepathtestnamespace/crs1",
|
||||
ExpectedGVK: gvkP("apps", "v1", "ControllerRevision"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/apps/v1beta2
|
||||
gvr("apps", "v1beta2", "statefulsets"): {
|
||||
Stub: `{"metadata": {"name": "ss2"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}}}}`,
|
||||
ExpectedEtcdPath: "/registry/statefulsets/etcdstoragepathtestnamespace/ss2",
|
||||
ExpectedGVK: gvkP("apps", "v1", "StatefulSet"),
|
||||
},
|
||||
gvr("apps", "v1beta2", "deployments"): {
|
||||
Stub: `{"metadata": {"name": "deployment3"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/deployments/etcdstoragepathtestnamespace/deployment3",
|
||||
ExpectedGVK: gvkP("apps", "v1", "Deployment"),
|
||||
},
|
||||
gvr("apps", "v1beta2", "daemonsets"): {
|
||||
Stub: `{"metadata": {"name": "ds5"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/daemonsets/etcdstoragepathtestnamespace/ds5",
|
||||
ExpectedGVK: gvkP("apps", "v1", "DaemonSet"),
|
||||
},
|
||||
gvr("apps", "v1beta2", "replicasets"): {
|
||||
Stub: `{"metadata": {"name": "rs2"}, "spec": {"selector": {"matchLabels": {"g": "h"}}, "template": {"metadata": {"labels": {"g": "h"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container4"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/replicasets/etcdstoragepathtestnamespace/rs2",
|
||||
ExpectedGVK: gvkP("apps", "v1", "ReplicaSet"),
|
||||
},
|
||||
gvr("apps", "v1beta2", "controllerrevisions"): {
|
||||
Stub: `{"metadata":{"name":"crs2"},"data":{"name":"abc","namespace":"default","creationTimestamp":null,"Spec":{"Replicas":0,"Selector":{"matchLabels":{"foo":"bar"}},"Template":{"creationTimestamp":null,"labels":{"foo":"bar"},"Spec":{"Volumes":null,"InitContainers":null,"Containers":null,"RestartPolicy":"Always","TerminationGracePeriodSeconds":null,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"","AutomountServiceAccountToken":null,"NodeName":"","SecurityContext":null,"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"","Tolerations":null,"HostAliases":null}},"VolumeClaimTemplates":null,"ServiceName":""},"Status":{"ObservedGeneration":null,"Replicas":0}},"revision":0}`,
|
||||
ExpectedEtcdPath: "/registry/controllerrevisions/etcdstoragepathtestnamespace/crs2",
|
||||
ExpectedGVK: gvkP("apps", "v1", "ControllerRevision"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/apps/v1
|
||||
gvr("apps", "v1", "daemonsets"): {
|
||||
Stub: `{"metadata": {"name": "ds6"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/daemonsets/etcdstoragepathtestnamespace/ds6",
|
||||
},
|
||||
gvr("apps", "v1", "deployments"): {
|
||||
Stub: `{"metadata": {"name": "deployment4"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/deployments/etcdstoragepathtestnamespace/deployment4",
|
||||
},
|
||||
gvr("apps", "v1", "statefulsets"): {
|
||||
Stub: `{"metadata": {"name": "ss3"}, "spec": {"selector": {"matchLabels": {"a": "b"}}, "template": {"metadata": {"labels": {"a": "b"}}}}}`,
|
||||
ExpectedEtcdPath: "/registry/statefulsets/etcdstoragepathtestnamespace/ss3",
|
||||
},
|
||||
gvr("apps", "v1", "replicasets"): {
|
||||
Stub: `{"metadata": {"name": "rs3"}, "spec": {"selector": {"matchLabels": {"g": "h"}}, "template": {"metadata": {"labels": {"g": "h"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container4"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/replicasets/etcdstoragepathtestnamespace/rs3",
|
||||
},
|
||||
gvr("apps", "v1", "controllerrevisions"): {
|
||||
Stub: `{"metadata":{"name":"crs3"},"data":{"name":"abc","namespace":"default","creationTimestamp":null,"Spec":{"Replicas":0,"Selector":{"matchLabels":{"foo":"bar"}},"Template":{"creationTimestamp":null,"labels":{"foo":"bar"},"Spec":{"Volumes":null,"InitContainers":null,"Containers":null,"RestartPolicy":"Always","TerminationGracePeriodSeconds":null,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"","AutomountServiceAccountToken":null,"NodeName":"","SecurityContext":null,"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"","Tolerations":null,"HostAliases":null}},"VolumeClaimTemplates":null,"ServiceName":""},"Status":{"ObservedGeneration":null,"Replicas":0}},"revision":0}`,
|
||||
ExpectedEtcdPath: "/registry/controllerrevisions/etcdstoragepathtestnamespace/crs3",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/autoscaling/v1
|
||||
gvr("autoscaling", "v1", "horizontalpodautoscalers"): {
|
||||
Stub: `{"metadata": {"name": "hpa2"}, "spec": {"maxReplicas": 3, "scaleTargetRef": {"kind": "something", "name": "cross"}}}`,
|
||||
ExpectedEtcdPath: "/registry/horizontalpodautoscalers/etcdstoragepathtestnamespace/hpa2",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1
|
||||
gvr("autoscaling", "v2beta1", "horizontalpodautoscalers"): {
|
||||
Stub: `{"metadata": {"name": "hpa1"}, "spec": {"maxReplicas": 3, "scaleTargetRef": {"kind": "something", "name": "cross"}}}`,
|
||||
ExpectedEtcdPath: "/registry/horizontalpodautoscalers/etcdstoragepathtestnamespace/hpa1",
|
||||
ExpectedGVK: gvkP("autoscaling", "v1", "HorizontalPodAutoscaler"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2
|
||||
gvr("autoscaling", "v2beta2", "horizontalpodautoscalers"): {
|
||||
Stub: `{"metadata": {"name": "hpa3"}, "spec": {"maxReplicas": 3, "scaleTargetRef": {"kind": "something", "name": "cross"}}}`,
|
||||
ExpectedEtcdPath: "/registry/horizontalpodautoscalers/etcdstoragepathtestnamespace/hpa3",
|
||||
ExpectedGVK: gvkP("autoscaling", "v1", "HorizontalPodAutoscaler"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v1
|
||||
gvr("batch", "v1", "jobs"): {
|
||||
Stub: `{"metadata": {"name": "job1"}, "spec": {"manualSelector": true, "selector": {"matchLabels": {"controller-uid": "uid1"}}, "template": {"metadata": {"labels": {"controller-uid": "uid1"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container1"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}`,
|
||||
ExpectedEtcdPath: "/registry/jobs/etcdstoragepathtestnamespace/job1",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v1beta1
|
||||
gvr("batch", "v1beta1", "cronjobs"): {
|
||||
Stub: `{"metadata": {"name": "cjv1beta1"}, "spec": {"jobTemplate": {"spec": {"template": {"metadata": {"labels": {"controller-uid": "uid0"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container0"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}, "schedule": "* * * * *"}}`,
|
||||
ExpectedEtcdPath: "/registry/cronjobs/etcdstoragepathtestnamespace/cjv1beta1",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/batch/v2alpha1
|
||||
gvr("batch", "v2alpha1", "cronjobs"): {
|
||||
Stub: `{"metadata": {"name": "cjv2alpha1"}, "spec": {"jobTemplate": {"spec": {"template": {"metadata": {"labels": {"controller-uid": "uid0"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container0"}], "dnsPolicy": "ClusterFirst", "restartPolicy": "Never"}}}}, "schedule": "* * * * *"}}`,
|
||||
ExpectedEtcdPath: "/registry/cronjobs/etcdstoragepathtestnamespace/cjv2alpha1",
|
||||
ExpectedGVK: gvkP("batch", "v1beta1", "CronJob"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/certificates/v1beta1
|
||||
gvr("certificates.k8s.io", "v1beta1", "certificatesigningrequests"): {
|
||||
Stub: `{"metadata": {"name": "csr1"}, "spec": {"request": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQnlqQ0NBVE1DQVFBd2dZa3hDekFKQmdOVkJBWVRBbFZUTVJNd0VRWURWUVFJRXdwRFlXeHBabTl5Ym1saApNUll3RkFZRFZRUUhFdzFOYjNWdWRHRnBiaUJXYVdWM01STXdFUVlEVlFRS0V3cEhiMjluYkdVZ1NXNWpNUjh3CkhRWURWUVFMRXhaSmJtWnZjbTFoZEdsdmJpQlVaV05vYm05c2IyZDVNUmN3RlFZRFZRUURFdzUzZDNjdVoyOXYKWjJ4bExtTnZiVENCbnpBTkJna3Foa2lHOXcwQkFRRUZBQU9CalFBd2dZa0NnWUVBcFp0WUpDSEo0VnBWWEhmVgpJbHN0UVRsTzRxQzAzaGpYK1prUHl2ZFlkMVE0K3FiQWVUd1htQ1VLWUhUaFZSZDVhWFNxbFB6eUlCd2llTVpyCldGbFJRZGRaMUl6WEFsVlJEV3dBbzYwS2VjcWVBWG5uVUsrNWZYb1RJL1VnV3NocmU4dEoreC9UTUhhUUtSL0oKY0lXUGhxYVFoc0p1elpidkFkR0E4MEJMeGRNQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQkJRVUFBNEdCQUlobAo0UHZGcStlN2lwQVJnSTVaTStHWng2bXBDejQ0RFRvMEprd2ZSRGYrQnRyc2FDMHE2OGVUZjJYaFlPc3E0ZmtIClEwdUEwYVZvZzNmNWlKeENhM0hwNWd4YkpRNnpWNmtKMFRFc3VhYU9oRWtvOXNkcENvUE9uUkJtMmkvWFJEMkQKNmlOaDhmOHowU2hHc0ZxakRnRkh5RjNvK2xVeWorVUM2SDFRVzdibgotLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0="}}`,
|
||||
ExpectedEtcdPath: "/registry/certificatesigningrequests/csr1",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/coordination/v1beta1
|
||||
gvr("coordination.k8s.io", "v1beta1", "leases"): {
|
||||
Stub: `{"metadata": {"name": "lease1"}, "spec": {"holderIdentity": "holder", "leaseDurationSeconds": 5}}`,
|
||||
ExpectedEtcdPath: "/registry/leases/etcdstoragepathtestnamespace/lease1",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/events/v1beta1
|
||||
gvr("events.k8s.io", "v1beta1", "events"): {
|
||||
Stub: `{"metadata": {"name": "event2"}, "regarding": {"namespace": "etcdstoragepathtestnamespace"}, "note": "some data here", "eventTime": "2017-08-09T15:04:05.000000Z", "reportingInstance": "node-xyz", "reportingController": "k8s.io/my-controller", "action": "DidNothing", "reason": "Laziness"}`,
|
||||
ExpectedEtcdPath: "/registry/events/etcdstoragepathtestnamespace/event2",
|
||||
ExpectedGVK: gvkP("", "v1", "Event"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/extensions/v1beta1
|
||||
gvr("extensions", "v1beta1", "daemonsets"): {
|
||||
Stub: `{"metadata": {"name": "ds1"}, "spec": {"selector": {"matchLabels": {"u": "t"}}, "template": {"metadata": {"labels": {"u": "t"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container5"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/daemonsets/etcdstoragepathtestnamespace/ds1",
|
||||
ExpectedGVK: gvkP("apps", "v1", "DaemonSet"),
|
||||
},
|
||||
gvr("extensions", "v1beta1", "podsecuritypolicies"): {
|
||||
Stub: `{"metadata": {"name": "psp1"}, "spec": {"fsGroup": {"rule": "RunAsAny"}, "privileged": true, "runAsUser": {"rule": "RunAsAny"}, "seLinux": {"rule": "MustRunAs"}, "supplementalGroups": {"rule": "RunAsAny"}}}`,
|
||||
ExpectedEtcdPath: "/registry/podsecuritypolicy/psp1",
|
||||
ExpectedGVK: gvkP("policy", "v1beta1", "PodSecurityPolicy"),
|
||||
},
|
||||
gvr("extensions", "v1beta1", "ingresses"): {
|
||||
Stub: `{"metadata": {"name": "ingress1"}, "spec": {"backend": {"serviceName": "service", "servicePort": 5000}}}`,
|
||||
ExpectedEtcdPath: "/registry/ingress/etcdstoragepathtestnamespace/ingress1",
|
||||
},
|
||||
gvr("extensions", "v1beta1", "networkpolicies"): {
|
||||
Stub: `{"metadata": {"name": "np1"}, "spec": {"podSelector": {"matchLabels": {"e": "f"}}}}`,
|
||||
ExpectedEtcdPath: "/registry/networkpolicies/etcdstoragepathtestnamespace/np1",
|
||||
ExpectedGVK: gvkP("networking.k8s.io", "v1", "NetworkPolicy"),
|
||||
},
|
||||
gvr("extensions", "v1beta1", "deployments"): {
|
||||
Stub: `{"metadata": {"name": "deployment1"}, "spec": {"selector": {"matchLabels": {"f": "z"}}, "template": {"metadata": {"labels": {"f": "z"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container6"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/deployments/etcdstoragepathtestnamespace/deployment1",
|
||||
ExpectedGVK: gvkP("apps", "v1", "Deployment"),
|
||||
},
|
||||
gvr("extensions", "v1beta1", "replicasets"): {
|
||||
Stub: `{"metadata": {"name": "rs1"}, "spec": {"selector": {"matchLabels": {"g": "h"}}, "template": {"metadata": {"labels": {"g": "h"}}, "spec": {"containers": [{"image": "fedora:latest", "name": "container4"}]}}}}`,
|
||||
ExpectedEtcdPath: "/registry/replicasets/etcdstoragepathtestnamespace/rs1",
|
||||
ExpectedGVK: gvkP("apps", "v1", "ReplicaSet"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/networking/v1
|
||||
gvr("networking.k8s.io", "v1", "networkpolicies"): {
|
||||
Stub: `{"metadata": {"name": "np2"}, "spec": {"podSelector": {"matchLabels": {"e": "f"}}}}`,
|
||||
ExpectedEtcdPath: "/registry/networkpolicies/etcdstoragepathtestnamespace/np2",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/policy/v1beta1
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): {
|
||||
Stub: `{"metadata": {"name": "pdb1"}, "spec": {"selector": {"matchLabels": {"anokkey": "anokvalue"}}}}`,
|
||||
ExpectedEtcdPath: "/registry/poddisruptionbudgets/etcdstoragepathtestnamespace/pdb1",
|
||||
},
|
||||
gvr("policy", "v1beta1", "podsecuritypolicies"): {
|
||||
Stub: `{"metadata": {"name": "psp2"}, "spec": {"fsGroup": {"rule": "RunAsAny"}, "privileged": true, "runAsUser": {"rule": "RunAsAny"}, "seLinux": {"rule": "MustRunAs"}, "supplementalGroups": {"rule": "RunAsAny"}}}`,
|
||||
ExpectedEtcdPath: "/registry/podsecuritypolicy/psp2",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1alpha1
|
||||
gvr("storage.k8s.io", "v1alpha1", "volumeattachments"): {
|
||||
Stub: `{"metadata": {"name": "va1"}, "spec": {"attacher": "gce", "nodeName": "localhost", "source": {"persistentVolumeName": "pv1"}}}`,
|
||||
ExpectedEtcdPath: "/registry/volumeattachments/va1",
|
||||
ExpectedGVK: gvkP("storage.k8s.io", "v1beta1", "VolumeAttachment"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1beta1
|
||||
gvr("storage.k8s.io", "v1beta1", "volumeattachments"): {
|
||||
Stub: `{"metadata": {"name": "va2"}, "spec": {"attacher": "gce", "nodeName": "localhost", "source": {"persistentVolumeName": "pv2"}}}`,
|
||||
ExpectedEtcdPath: "/registry/volumeattachments/va2",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1
|
||||
gvr("storage.k8s.io", "v1", "volumeattachments"): {
|
||||
Stub: `{"metadata": {"name": "va3"}, "spec": {"attacher": "gce", "nodeName": "localhost", "source": {"persistentVolumeName": "pv3"}}}`,
|
||||
ExpectedEtcdPath: "/registry/volumeattachments/va3",
|
||||
ExpectedGVK: gvkP("storage.k8s.io", "v1beta1", "VolumeAttachment"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1beta1
|
||||
gvr("storage.k8s.io", "v1beta1", "storageclasses"): {
|
||||
Stub: `{"metadata": {"name": "sc1"}, "provisioner": "aws"}`,
|
||||
ExpectedEtcdPath: "/registry/storageclasses/sc1",
|
||||
ExpectedGVK: gvkP("storage.k8s.io", "v1", "StorageClass"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/storage/v1
|
||||
gvr("storage.k8s.io", "v1", "storageclasses"): {
|
||||
Stub: `{"metadata": {"name": "sc2"}, "provisioner": "aws"}`,
|
||||
ExpectedEtcdPath: "/registry/storageclasses/sc2",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/settings/v1alpha1
|
||||
gvr("settings.k8s.io", "v1alpha1", "podpresets"): {
|
||||
Stub: `{"metadata": {"name": "podpre1"}, "spec": {"env": [{"name": "FOO"}]}}`,
|
||||
ExpectedEtcdPath: "/registry/podpresets/etcdstoragepathtestnamespace/podpre1",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/rbac/v1alpha1
|
||||
gvr("rbac.authorization.k8s.io", "v1alpha1", "roles"): {
|
||||
Stub: `{"metadata": {"name": "role1"}, "rules": [{"apiGroups": ["v1"], "resources": ["events"], "verbs": ["watch"]}]}`,
|
||||
ExpectedEtcdPath: "/registry/roles/etcdstoragepathtestnamespace/role1",
|
||||
ExpectedGVK: gvkP("rbac.authorization.k8s.io", "v1", "Role"),
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1alpha1", "clusterroles"): {
|
||||
Stub: `{"metadata": {"name": "crole1"}, "rules": [{"nonResourceURLs": ["/version"], "verbs": ["get"]}]}`,
|
||||
ExpectedEtcdPath: "/registry/clusterroles/crole1",
|
||||
ExpectedGVK: gvkP("rbac.authorization.k8s.io", "v1", "ClusterRole"),
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1alpha1", "rolebindings"): {
|
||||
Stub: `{"metadata": {"name": "roleb1"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
ExpectedEtcdPath: "/registry/rolebindings/etcdstoragepathtestnamespace/roleb1",
|
||||
ExpectedGVK: gvkP("rbac.authorization.k8s.io", "v1", "RoleBinding"),
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1alpha1", "clusterrolebindings"): {
|
||||
Stub: `{"metadata": {"name": "croleb1"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
ExpectedEtcdPath: "/registry/clusterrolebindings/croleb1",
|
||||
ExpectedGVK: gvkP("rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/rbac/v1beta1
|
||||
gvr("rbac.authorization.k8s.io", "v1beta1", "roles"): {
|
||||
Stub: `{"metadata": {"name": "role2"}, "rules": [{"apiGroups": ["v1"], "resources": ["events"], "verbs": ["watch"]}]}`,
|
||||
ExpectedEtcdPath: "/registry/roles/etcdstoragepathtestnamespace/role2",
|
||||
ExpectedGVK: gvkP("rbac.authorization.k8s.io", "v1", "Role"),
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1beta1", "clusterroles"): {
|
||||
Stub: `{"metadata": {"name": "crole2"}, "rules": [{"nonResourceURLs": ["/version"], "verbs": ["get"]}]}`,
|
||||
ExpectedEtcdPath: "/registry/clusterroles/crole2",
|
||||
ExpectedGVK: gvkP("rbac.authorization.k8s.io", "v1", "ClusterRole"),
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1beta1", "rolebindings"): {
|
||||
Stub: `{"metadata": {"name": "roleb2"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
ExpectedEtcdPath: "/registry/rolebindings/etcdstoragepathtestnamespace/roleb2",
|
||||
ExpectedGVK: gvkP("rbac.authorization.k8s.io", "v1", "RoleBinding"),
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1beta1", "clusterrolebindings"): {
|
||||
Stub: `{"metadata": {"name": "croleb2"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
ExpectedEtcdPath: "/registry/clusterrolebindings/croleb2",
|
||||
ExpectedGVK: gvkP("rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/rbac/v1
|
||||
gvr("rbac.authorization.k8s.io", "v1", "roles"): {
|
||||
Stub: `{"metadata": {"name": "role3"}, "rules": [{"apiGroups": ["v1"], "resources": ["events"], "verbs": ["watch"]}]}`,
|
||||
ExpectedEtcdPath: "/registry/roles/etcdstoragepathtestnamespace/role3",
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1", "clusterroles"): {
|
||||
Stub: `{"metadata": {"name": "crole3"}, "rules": [{"nonResourceURLs": ["/version"], "verbs": ["get"]}]}`,
|
||||
ExpectedEtcdPath: "/registry/clusterroles/crole3",
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1", "rolebindings"): {
|
||||
Stub: `{"metadata": {"name": "roleb3"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
ExpectedEtcdPath: "/registry/rolebindings/etcdstoragepathtestnamespace/roleb3",
|
||||
},
|
||||
gvr("rbac.authorization.k8s.io", "v1", "clusterrolebindings"): {
|
||||
Stub: `{"metadata": {"name": "croleb3"}, "roleRef": {"apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", "name": "somecr"}, "subjects": [{"apiVersion": "rbac.authorization.k8s.io/v1alpha1", "kind": "Group", "name": "system:authenticated"}]}`,
|
||||
ExpectedEtcdPath: "/registry/clusterrolebindings/croleb3",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1
|
||||
gvr("admissionregistration.k8s.io", "v1alpha1", "initializerconfigurations"): {
|
||||
Stub: `{"metadata":{"name":"ic1"},"initializers":[{"name":"initializer.k8s.io","rules":[{"apiGroups":["group"],"apiVersions":["version"],"resources":["resource"]}],"failurePolicy":"Ignore"}]}`,
|
||||
ExpectedEtcdPath: "/registry/initializerconfigurations/ic1",
|
||||
},
|
||||
// k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1
|
||||
gvr("admissionregistration.k8s.io", "v1beta1", "validatingwebhookconfigurations"): {
|
||||
Stub: `{"metadata":{"name":"hook1","creationTimestamp":null},"webhooks":[{"name":"externaladmissionhook.k8s.io","clientConfig":{"service":{"namespace":"ns","name":"n"},"caBundle":null},"rules":[{"operations":["CREATE"],"apiGroups":["group"],"apiVersions":["version"],"resources":["resource"]}],"failurePolicy":"Ignore"}]}`,
|
||||
ExpectedEtcdPath: "/registry/validatingwebhookconfigurations/hook1",
|
||||
},
|
||||
gvr("admissionregistration.k8s.io", "v1beta1", "mutatingwebhookconfigurations"): {
|
||||
Stub: `{"metadata":{"name":"hook1","creationTimestamp":null},"webhooks":[{"name":"externaladmissionhook.k8s.io","clientConfig":{"service":{"namespace":"ns","name":"n"},"caBundle":null},"rules":[{"operations":["CREATE"],"apiGroups":["group"],"apiVersions":["version"],"resources":["resource"]}],"failurePolicy":"Ignore"}]}`,
|
||||
ExpectedEtcdPath: "/registry/mutatingwebhookconfigurations/hook1",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1
|
||||
gvr("scheduling.k8s.io", "v1alpha1", "priorityclasses"): {
|
||||
Stub: `{"metadata":{"name":"pc1"},"Value":1000}`,
|
||||
ExpectedEtcdPath: "/registry/priorityclasses/pc1",
|
||||
ExpectedGVK: gvkP("scheduling.k8s.io", "v1beta1", "PriorityClass"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/scheduling/v1beta1
|
||||
gvr("scheduling.k8s.io", "v1beta1", "priorityclasses"): {
|
||||
Stub: `{"metadata":{"name":"pc2"},"Value":1000}`,
|
||||
ExpectedEtcdPath: "/registry/priorityclasses/pc2",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1
|
||||
// depends on aggregator using the same ungrouped RESTOptionsGetter as the kube apiserver, not SimpleRestOptionsFactory in aggregator.go
|
||||
gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): {
|
||||
Stub: `{"metadata": {"name": "as1.foo.com"}, "spec": {"group": "foo.com", "version": "as1", "groupPriorityMinimum":100, "versionPriority":10}}`,
|
||||
ExpectedEtcdPath: "/registry/apiregistration.k8s.io/apiservices/as1.foo.com",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
// depends on aggregator using the same ungrouped RESTOptionsGetter as the kube apiserver, not SimpleRestOptionsFactory in aggregator.go
|
||||
gvr("apiregistration.k8s.io", "v1", "apiservices"): {
|
||||
Stub: `{"metadata": {"name": "as2.foo.com"}, "spec": {"group": "foo.com", "version": "as2", "groupPriorityMinimum":100, "versionPriority":10}}`,
|
||||
ExpectedEtcdPath: "/registry/apiregistration.k8s.io/apiservices/as2.foo.com",
|
||||
ExpectedGVK: gvkP("apiregistration.k8s.io", "v1beta1", "APIService"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
|
||||
gvr("apiextensions.k8s.io", "v1beta1", "customresourcedefinitions"): {
|
||||
Stub: `{"metadata": {"name": "openshiftwebconsoleconfigs.webconsole.operator.openshift.io"},"spec": {"scope": "Cluster","group": "webconsole.operator.openshift.io","version": "v1alpha1","names": {"kind": "OpenShiftWebConsoleConfig","plural": "openshiftwebconsoleconfigs","singular": "openshiftwebconsoleconfig"}}}`,
|
||||
ExpectedEtcdPath: "/registry/apiextensions.k8s.io/customresourcedefinitions/openshiftwebconsoleconfigs.webconsole.operator.openshift.io",
|
||||
},
|
||||
gvr("cr.bar.com", "v1", "foos"): {
|
||||
Stub: `{"kind": "Foo", "apiVersion": "cr.bar.com/v1", "metadata": {"name": "cr1foo"}, "color": "blue"}`, // requires TypeMeta due to CRD scheme's UnstructuredObjectTyper
|
||||
ExpectedEtcdPath: "/registry/cr.bar.com/foos/etcdstoragepathtestnamespace/cr1foo",
|
||||
},
|
||||
gvr("custom.fancy.com", "v2", "pants"): {
|
||||
Stub: `{"kind": "Pant", "apiVersion": "custom.fancy.com/v2", "metadata": {"name": "cr2pant"}, "isFancy": true}`, // requires TypeMeta due to CRD scheme's UnstructuredObjectTyper
|
||||
ExpectedEtcdPath: "/registry/custom.fancy.com/pants/cr2pant",
|
||||
},
|
||||
gvr("awesome.bears.com", "v1", "pandas"): {
|
||||
Stub: `{"kind": "Panda", "apiVersion": "awesome.bears.com/v1", "metadata": {"name": "cr3panda"}, "weight": 100}`, // requires TypeMeta due to CRD scheme's UnstructuredObjectTyper
|
||||
ExpectedEtcdPath: "/registry/awesome.bears.com/pandas/cr3panda",
|
||||
},
|
||||
gvr("awesome.bears.com", "v3", "pandas"): {
|
||||
Stub: `{"kind": "Panda", "apiVersion": "awesome.bears.com/v3", "metadata": {"name": "cr4panda"}, "weight": 300}`, // requires TypeMeta due to CRD scheme's UnstructuredObjectTyper
|
||||
ExpectedEtcdPath: "/registry/awesome.bears.com/pandas/cr4panda",
|
||||
ExpectedGVK: gvkP("awesome.bears.com", "v1", "Panda"),
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1
|
||||
gvr("auditregistration.k8s.io", "v1alpha1", "auditsinks"): {
|
||||
Stub: `{"metadata":{"name":"sink1"},"spec":{"policy":{"level":"Metadata","stages":["ResponseStarted"]},"webhook":{"clientConfig":{"url":"http://localhost:4444","service":null,"caBundle":null}}}}`,
|
||||
ExpectedEtcdPath: "/registry/auditsinks/sink1",
|
||||
},
|
||||
// --
|
||||
}
|
||||
}
|
||||
|
||||
// StorageData contains information required to create an object and verify its storage in etcd
|
||||
// It must be paired with a specific resource
|
||||
type StorageData struct {
|
||||
Stub string // Valid JSON stub to use during create
|
||||
Prerequisites []Prerequisite // Optional, ordered list of JSON objects to create before stub
|
||||
ExpectedEtcdPath string // Expected location of object in etcd, do not use any variables, constants, etc to derive this value - always supply the full raw string
|
||||
ExpectedGVK *schema.GroupVersionKind // The GVK that we expect this object to be stored as - leave this nil to use the default
|
||||
}
|
||||
|
||||
// Prerequisite contains information required to create a resource (but not verify it)
|
||||
type Prerequisite struct {
|
||||
GvrData schema.GroupVersionResource
|
||||
Stub string
|
||||
}
|
||||
|
||||
// GetCustomResourceDefinitionData returns the resource definitions that back the custom resources
|
||||
// included in GetEtcdStorageData. They should be created using CreateTestCRDs before running any tests.
|
||||
func GetCustomResourceDefinitionData() []*apiextensionsv1beta1.CustomResourceDefinition {
|
||||
return []*apiextensionsv1beta1.CustomResourceDefinition{
|
||||
// namespaced with legacy version field
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foos.cr.bar.com",
|
||||
},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: "cr.bar.com",
|
||||
Version: "v1",
|
||||
Scope: apiextensionsv1beta1.NamespaceScoped,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: "foos",
|
||||
Kind: "Foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
// cluster scoped with legacy version field
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pants.custom.fancy.com",
|
||||
},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: "custom.fancy.com",
|
||||
Version: "v2",
|
||||
Scope: apiextensionsv1beta1.ClusterScoped,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: "pants",
|
||||
Kind: "Pant",
|
||||
},
|
||||
},
|
||||
},
|
||||
// cluster scoped with versions field
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pandas.awesome.bears.com",
|
||||
},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: "awesome.bears.com",
|
||||
Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{
|
||||
{
|
||||
Name: "v1",
|
||||
Served: true,
|
||||
Storage: true,
|
||||
},
|
||||
{
|
||||
Name: "v2",
|
||||
Served: false,
|
||||
Storage: false,
|
||||
},
|
||||
{
|
||||
Name: "v3",
|
||||
Served: true,
|
||||
Storage: false,
|
||||
},
|
||||
},
|
||||
Scope: apiextensionsv1beta1.ClusterScoped,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: "pandas",
|
||||
Kind: "Panda",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func gvr(g, v, r string) schema.GroupVersionResource {
|
||||
return schema.GroupVersionResource{Group: g, Version: v, Resource: r}
|
||||
}
|
||||
|
||||
func gvkP(g, v, k string) *schema.GroupVersionKind {
|
||||
return &schema.GroupVersionKind{Group: g, Version: v, Kind: k}
|
||||
}
|
954
vendor/k8s.io/kubernetes/test/integration/etcd/etcd_storage_path_test.go
generated
vendored
954
vendor/k8s.io/kubernetes/test/integration/etcd/etcd_storage_path_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
367
vendor/k8s.io/kubernetes/test/integration/etcd/server.go
generated
vendored
Normal file
367
vendor/k8s.io/kubernetes/test/integration/etcd/server.go
generated
vendored
Normal file
@ -0,0 +1,367 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/concurrency"
|
||||
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
// install all APIs
|
||||
_ "k8s.io/kubernetes/pkg/master"
|
||||
)
|
||||
|
||||
// StartRealMasterOrDie starts an API master that is appropriate for use in tests that require one of every resource
|
||||
func StartRealMasterOrDie(t *testing.T) *Master {
|
||||
certDir, err := ioutil.TempDir("", t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, defaultServiceClusterIPRange, err := net.ParseCIDR("10.0.0.0/24")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
listener, _, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||
kubeAPIServerOptions.InsecureServing.BindPort = 0
|
||||
kubeAPIServerOptions.SecureServing.Listener = listener
|
||||
kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir
|
||||
kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()}
|
||||
kubeAPIServerOptions.Etcd.DefaultStorageMediaType = runtime.ContentTypeJSON // force json we can easily interpret the result in etcd
|
||||
kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange
|
||||
kubeAPIServerOptions.Authorization.Modes = []string{"RBAC"}
|
||||
kubeAPIServerOptions.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||
completedOptions, err := app.Complete(kubeAPIServerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := completedOptions.APIEnablement.RuntimeConfig.Set("api/all=true"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// get etcd client before starting API server
|
||||
rawClient, kvClient, err := integration.GetEtcdClients(completedOptions.Etcd.StorageConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// get a leased session
|
||||
session, err := concurrency.NewSession(rawClient)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// then build and use an etcd lock
|
||||
// this prevents more than one of these masters from running at the same time
|
||||
lock := concurrency.NewLocker(session, "kube_integration_etcd_raw")
|
||||
lock.Lock()
|
||||
|
||||
// make sure we start with a clean slate
|
||||
if _, err := kvClient.Delete(context.Background(), "/registry/", clientv3.WithPrefix()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
|
||||
kubeAPIServer, err := app.CreateServerChain(completedOptions, stopCh)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kubeClientConfig := restclient.CopyConfig(kubeAPIServer.LoopbackClientConfig)
|
||||
|
||||
// we make lots of requests, don't be slow
|
||||
kubeClientConfig.QPS = 99999
|
||||
kubeClientConfig.Burst = 9999
|
||||
|
||||
kubeClient := clientset.NewForConfigOrDie(kubeClientConfig)
|
||||
|
||||
go func() {
|
||||
// Catch panics that occur in this go routine so we get a comprehensible failure
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
t.Errorf("Unexpected panic trying to start API master: %#v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := kubeAPIServer.PrepareRun().Run(stopCh); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
lastHealth := ""
|
||||
if err := wait.PollImmediate(time.Second, time.Minute, func() (done bool, err error) {
|
||||
// wait for the server to be healthy
|
||||
result := kubeClient.RESTClient().Get().AbsPath("/healthz").Do()
|
||||
content, _ := result.Raw()
|
||||
lastHealth = string(content)
|
||||
if errResult := result.Error(); errResult != nil {
|
||||
t.Log(errResult)
|
||||
return false, nil
|
||||
}
|
||||
var status int
|
||||
result.StatusCode(&status)
|
||||
return status == http.StatusOK, nil
|
||||
}); err != nil {
|
||||
t.Log(lastHealth)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create CRDs so we can make sure that custom resources do not get lost
|
||||
CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(kubeClientConfig), false, GetCustomResourceDefinitionData()...)
|
||||
|
||||
// force cached discovery reset
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(kubeClient.Discovery())
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
restMapper.Reset()
|
||||
|
||||
serverResources, err := kubeClient.Discovery().ServerResources()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
if err := os.RemoveAll(certDir); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
close(stopCh)
|
||||
lock.Unlock()
|
||||
if err := session.Close(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
}
|
||||
|
||||
return &Master{
|
||||
Client: kubeClient,
|
||||
Dynamic: dynamic.NewForConfigOrDie(kubeClientConfig),
|
||||
Config: kubeClientConfig,
|
||||
KV: kvClient,
|
||||
Mapper: restMapper,
|
||||
Resources: GetResources(t, serverResources),
|
||||
Cleanup: cleanup,
|
||||
}
|
||||
}
|
||||
|
||||
// Master represents a running API server that is ready for use
|
||||
// The Cleanup func must be deferred to prevent resource leaks
|
||||
type Master struct {
|
||||
Client clientset.Interface
|
||||
Dynamic dynamic.Interface
|
||||
Config *restclient.Config
|
||||
KV clientv3.KV
|
||||
Mapper meta.RESTMapper
|
||||
Resources []Resource
|
||||
Cleanup func()
|
||||
}
|
||||
|
||||
// Resource contains REST mapping information for a specific resource and extra metadata such as delete collection support
|
||||
type Resource struct {
|
||||
Mapping *meta.RESTMapping
|
||||
HasDeleteCollection bool
|
||||
}
|
||||
|
||||
// GetResources fetches the Resources associated with serverResources that support get and create
|
||||
func GetResources(t *testing.T, serverResources []*metav1.APIResourceList) []Resource {
|
||||
var resources []Resource
|
||||
|
||||
for _, discoveryGroup := range serverResources {
|
||||
for _, discoveryResource := range discoveryGroup.APIResources {
|
||||
// this is a subresource, skip it
|
||||
if strings.Contains(discoveryResource.Name, "/") {
|
||||
continue
|
||||
}
|
||||
hasCreate := false
|
||||
hasGet := false
|
||||
hasDeleteCollection := false
|
||||
for _, verb := range discoveryResource.Verbs {
|
||||
if verb == "get" {
|
||||
hasGet = true
|
||||
}
|
||||
if verb == "create" {
|
||||
hasCreate = true
|
||||
}
|
||||
if verb == "deletecollection" {
|
||||
hasDeleteCollection = true
|
||||
}
|
||||
}
|
||||
if !(hasCreate && hasGet) {
|
||||
continue
|
||||
}
|
||||
|
||||
resourceGV, err := schema.ParseGroupVersion(discoveryGroup.GroupVersion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gvk := resourceGV.WithKind(discoveryResource.Kind)
|
||||
if len(discoveryResource.Group) > 0 || len(discoveryResource.Version) > 0 {
|
||||
gvk = schema.GroupVersionKind{
|
||||
Group: discoveryResource.Group,
|
||||
Version: discoveryResource.Version,
|
||||
Kind: discoveryResource.Kind,
|
||||
}
|
||||
}
|
||||
gvr := resourceGV.WithResource(discoveryResource.Name)
|
||||
|
||||
resources = append(resources, Resource{
|
||||
Mapping: &meta.RESTMapping{
|
||||
Resource: gvr,
|
||||
GroupVersionKind: gvk,
|
||||
Scope: scope(discoveryResource.Namespaced),
|
||||
},
|
||||
HasDeleteCollection: hasDeleteCollection,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return resources
|
||||
}
|
||||
|
||||
func scope(namespaced bool) meta.RESTScope {
|
||||
if namespaced {
|
||||
return meta.RESTScopeNamespace
|
||||
}
|
||||
return meta.RESTScopeRoot
|
||||
}
|
||||
|
||||
// JSONToUnstructured converts a JSON stub to unstructured.Unstructured and
|
||||
// returns a dynamic resource client that can be used to interact with it
|
||||
func JSONToUnstructured(stub, namespace string, mapping *meta.RESTMapping, dynamicClient dynamic.Interface) (dynamic.ResourceInterface, *unstructured.Unstructured, error) {
|
||||
typeMetaAdder := map[string]interface{}{}
|
||||
if err := json.Unmarshal([]byte(stub), &typeMetaAdder); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// we don't require GVK on the data we provide, so we fill it in here. We could, but that seems extraneous.
|
||||
typeMetaAdder["apiVersion"] = mapping.GroupVersionKind.GroupVersion().String()
|
||||
typeMetaAdder["kind"] = mapping.GroupVersionKind.Kind
|
||||
|
||||
if mapping.Scope == meta.RESTScopeRoot {
|
||||
namespace = ""
|
||||
}
|
||||
|
||||
return dynamicClient.Resource(mapping.Resource).Namespace(namespace), &unstructured.Unstructured{Object: typeMetaAdder}, nil
|
||||
}
|
||||
|
||||
// CreateTestCRDs creates the given CRDs, any failure causes the test to Fatal.
|
||||
// If skipCrdExistsInDiscovery is true, the CRDs are only checked for the Established condition via their Status.
|
||||
// If skipCrdExistsInDiscovery is false, the CRDs are checked via discovery, see CrdExistsInDiscovery.
|
||||
func CreateTestCRDs(t *testing.T, client apiextensionsclientset.Interface, skipCrdExistsInDiscovery bool, crds ...*apiextensionsv1beta1.CustomResourceDefinition) {
|
||||
for _, crd := range crds {
|
||||
createTestCRD(t, client, skipCrdExistsInDiscovery, crd)
|
||||
}
|
||||
}
|
||||
|
||||
func createTestCRD(t *testing.T, client apiextensionsclientset.Interface, skipCrdExistsInDiscovery bool, crd *apiextensionsv1beta1.CustomResourceDefinition) {
|
||||
if _, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil {
|
||||
t.Fatalf("Failed to create %s CRD; %v", crd.Name, err)
|
||||
}
|
||||
if skipCrdExistsInDiscovery {
|
||||
if err := waitForEstablishedCRD(client, crd.Name); err != nil {
|
||||
t.Fatalf("Failed to establish %s CRD; %v", crd.Name, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
return CrdExistsInDiscovery(client, crd), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to see %s in discovery: %v", crd.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForEstablishedCRD(client apiextensionsclientset.Interface, name string) error {
|
||||
return wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, cond := range crd.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case apiextensionsv1beta1.Established:
|
||||
if cond.Status == apiextensionsv1beta1.ConditionTrue {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// CrdExistsInDiscovery checks to see if the given CRD exists in discovery at all served versions.
|
||||
func CrdExistsInDiscovery(client apiextensionsclientset.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) bool {
|
||||
var versions []string
|
||||
if len(crd.Spec.Version) != 0 {
|
||||
versions = append(versions, crd.Spec.Version)
|
||||
}
|
||||
for _, v := range crd.Spec.Versions {
|
||||
if v.Served {
|
||||
versions = append(versions, v.Name)
|
||||
}
|
||||
}
|
||||
for _, v := range versions {
|
||||
if !crdVersionExistsInDiscovery(client, crd, v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func crdVersionExistsInDiscovery(client apiextensionsclientset.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition, version string) bool {
|
||||
resourceList, err := client.Discovery().ServerResourcesForGroupVersion(crd.Spec.Group + "/" + version)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, resource := range resourceList.APIResources {
|
||||
if resource.Name == crd.Spec.Names.Plural {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
22
vendor/k8s.io/kubernetes/test/integration/evictions/BUILD
generated
vendored
22
vendor/k8s.io/kubernetes/test/integration/evictions/BUILD
generated
vendored
@ -15,18 +15,18 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/disruption:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
89
vendor/k8s.io/kubernetes/test/integration/evictions/evictions_test.go
generated
vendored
89
vendor/k8s.io/kubernetes/test/integration/evictions/evictions_test.go
generated
vendored
@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -165,6 +166,82 @@ func TestConcurrentEvictionRequests(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestTerminalPodEviction ensures that PDB is not checked for terminal pods.
|
||||
func TestTerminalPodEviction(t *testing.T) {
|
||||
s, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||
defer closeFn()
|
||||
|
||||
ns := framework.CreateTestingNamespace("terminalpod-eviction", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create clientset: %v", err)
|
||||
}
|
||||
|
||||
var gracePeriodSeconds int64 = 30
|
||||
deleteOption := &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gracePeriodSeconds,
|
||||
}
|
||||
pod := newPod("test-terminal-pod1")
|
||||
if _, err := clientSet.CoreV1().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
addPodConditionSucceeded(pod)
|
||||
if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1)
|
||||
|
||||
pdb := newPDB()
|
||||
if _, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil {
|
||||
t.Errorf("Failed to create PodDisruptionBudget: %v", err)
|
||||
}
|
||||
|
||||
pdbList, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while listing pod disruption budget")
|
||||
}
|
||||
oldPdb := pdbList.Items[0]
|
||||
eviction := newEviction(ns.Name, pod.Name, deleteOption)
|
||||
err = wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
e := clientSet.Policy().Evictions(ns.Name).Evict(eviction)
|
||||
switch {
|
||||
case errors.IsTooManyRequests(e):
|
||||
return false, nil
|
||||
case errors.IsConflict(e):
|
||||
return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e)
|
||||
case e == nil:
|
||||
return true, nil
|
||||
default:
|
||||
return false, e
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Eviction of pod failed %v", err)
|
||||
}
|
||||
pdbList, err = clientSet.Policy().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while listing pod disruption budget")
|
||||
}
|
||||
newPdb := pdbList.Items[0]
|
||||
// We shouldn't see an update in pod disruption budget status' generation number as we are evicting terminal pods without checking for pod disruption.
|
||||
if !reflect.DeepEqual(newPdb.Status.ObservedGeneration, oldPdb.Status.ObservedGeneration) {
|
||||
t.Fatalf("Expected the pdb generation to be of same value %v but got %v", newPdb.Status.ObservedGeneration, oldPdb.Status.ObservedGeneration)
|
||||
}
|
||||
|
||||
if err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil {
|
||||
t.Fatalf("Failed to delete pod disruption budget")
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(podName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -182,6 +259,18 @@ func newPod(podName string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func addPodConditionSucceeded(pod *v1.Pod) {
|
||||
pod.Status = v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func addPodConditionReady(pod *v1.Pod) {
|
||||
pod.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
|
41
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
41
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
@ -20,28 +20,31 @@ go_test(
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/master/reconcilers:go_default_library",
|
||||
"//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/cmd/server:go_default_library",
|
||||
"//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/sample-apiserver/pkg/cmd/server:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/cmd/server:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/cmd/server:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
50
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
50
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
@ -31,10 +31,13 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
genericapiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
client "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
@ -113,7 +116,7 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
kubeAPIServerConfig, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -124,7 +127,7 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
kubeAPIServerClientConfig.ServerName = ""
|
||||
kubeClientConfigValue.Store(kubeAPIServerClientConfig)
|
||||
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), sharedInformers, versionedInformers, admissionPostStartHook)
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), admissionPostStartHook)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -331,10 +334,13 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// this is ugly, but sleep just a little bit so that the watch is probably observed. Since nothing will actually be added to discovery
|
||||
// (the service is missing), we don't have an external signal.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if _, err := aggregatorDiscoveryClient.Discovery().ServerResources(); err != nil {
|
||||
// wait for the unavailable API service to be processed with updated status
|
||||
err = wait.Poll(100*time.Millisecond, 5*time.Second, func() (done bool, err error) {
|
||||
_, err = aggregatorDiscoveryClient.Discovery().ServerResources()
|
||||
hasExpectedError := checkWardleUnavailableDiscoveryError(t, err)
|
||||
return hasExpectedError, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -357,13 +363,41 @@ func TestAggregatedAPIServer(t *testing.T) {
|
||||
// (the service is missing), we don't have an external signal.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
_, err = aggregatorDiscoveryClient.Discovery().ServerResources()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
hasExpectedError := checkWardleUnavailableDiscoveryError(t, err)
|
||||
if !hasExpectedError {
|
||||
t.Fatalf("Discovery call didn't return expected error: %v", err)
|
||||
}
|
||||
|
||||
// TODO figure out how to turn on enough of services and dns to run more
|
||||
}
|
||||
|
||||
func checkWardleUnavailableDiscoveryError(t *testing.T, err error) bool {
|
||||
if err == nil {
|
||||
t.Log("Discovery call expected to return failed unavailable service")
|
||||
return false
|
||||
}
|
||||
if !discovery.IsGroupDiscoveryFailedError(err) {
|
||||
t.Logf("Unexpected error: %T, %v", err, err)
|
||||
return false
|
||||
}
|
||||
discoveryErr := err.(*discovery.ErrGroupDiscoveryFailed)
|
||||
if len(discoveryErr.Groups) != 1 {
|
||||
t.Logf("Unexpected failed groups: %v", err)
|
||||
return false
|
||||
}
|
||||
groupVersion := schema.GroupVersion{Group: "wardle.k8s.io", Version: "v1alpha1"}
|
||||
groupVersionErr, ok := discoveryErr.Groups[groupVersion]
|
||||
if !ok {
|
||||
t.Logf("Unexpected failed group version: %v", err)
|
||||
return false
|
||||
}
|
||||
if !apierrors.IsServiceUnavailable(groupVersionErr) {
|
||||
t.Logf("Unexpected failed group version error: %v", err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func createKubeConfig(clientCfg *rest.Config) *clientcmdapi.Config {
|
||||
clusterNick := "cluster"
|
||||
userNick := "user"
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/examples/setup_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/examples/setup_test.go
generated
vendored
@ -111,7 +111,7 @@ func startTestServer(t *testing.T, stopCh <-chan struct{}, setup TestServerSetup
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
kubeAPIServerConfig, _, _, _, admissionPostStartHook, err := app.CreateKubeAPIServerConfig(completedOptions, tunneler, proxyTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -119,7 +119,7 @@ func startTestServer(t *testing.T, stopCh <-chan struct{}, setup TestServerSetup
|
||||
if setup.ModifyServerConfig != nil {
|
||||
setup.ModifyServerConfig(kubeAPIServerConfig)
|
||||
}
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), sharedInformers, versionedInformers, admissionPostStartHook)
|
||||
kubeAPIServer, err := app.CreateKubeAPIServer(kubeAPIServerConfig, genericapiserver.NewEmptyDelegate(), admissionPostStartHook)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/test/integration/examples/webhook_test.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/integration/examples/webhook_test.go
generated
vendored
@ -108,11 +108,18 @@ func (f auditChecker) LevelAndStages(attrs authorizer.Attributes) (auditinternal
|
||||
|
||||
type auditSinkFunc func(events ...*auditinternal.Event)
|
||||
|
||||
func (f auditSinkFunc) ProcessEvents(events ...*auditinternal.Event) {
|
||||
func (f auditSinkFunc) ProcessEvents(events ...*auditinternal.Event) bool {
|
||||
f(events...)
|
||||
return true
|
||||
}
|
||||
|
||||
func (auditSinkFunc) Run(stopCh <-chan struct{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (auditSinkFunc) Shutdown() {
|
||||
}
|
||||
|
||||
func (auditSinkFunc) String() string {
|
||||
return ""
|
||||
}
|
||||
|
59
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
59
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
@ -28,39 +28,40 @@ go_library(
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/util/env:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticatorfactory:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/request/union:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/union:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/storage:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/go-openapi/spec:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/union:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/union:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
169
vendor/k8s.io/kubernetes/test/integration/framework/etcd.go
generated
vendored
169
vendor/k8s.io/kubernetes/test/integration/framework/etcd.go
generated
vendored
@ -17,93 +17,126 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/adler32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/env"
|
||||
)
|
||||
|
||||
var (
|
||||
etcdSetup sync.Once
|
||||
etcdURL = ""
|
||||
)
|
||||
var etcdURL = ""
|
||||
|
||||
func setupETCD() {
|
||||
etcdSetup.Do(func() {
|
||||
if os.Getenv("RUNFILES_DIR") == "" {
|
||||
etcdURL = env.GetEnvAsStringOrFallback("KUBE_INTEGRATION_ETCD_URL", "http://127.0.0.1:2379")
|
||||
return
|
||||
}
|
||||
etcdPath := filepath.Join(os.Getenv("RUNFILES_DIR"), "com_coreos_etcd/etcd")
|
||||
// give every test the same random port each run
|
||||
etcdPort := 20000 + rand.New(rand.NewSource(int64(adler32.Checksum([]byte(os.Args[0]))))).Intn(5000)
|
||||
etcdURL = fmt.Sprintf("http://127.0.0.1:%d", etcdPort)
|
||||
const installEtcd = `
|
||||
Cannot find etcd, cannot run integration tests
|
||||
Please see https://github.com/kubernetes/community/blob/master/contributors/devel/testing.md#install-etcd-dependency for instructions.
|
||||
|
||||
info, err := os.Stat(etcdPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to stat etcd: %v", err)
|
||||
}
|
||||
if info.IsDir() {
|
||||
glog.Fatalf("Did not expect %q to be a directory", etcdPath)
|
||||
}
|
||||
You can use 'hack/install-etcd.sh' to install a copy in third_party/.
|
||||
|
||||
etcdDataDir, err := ioutil.TempDir(os.TempDir(), "integration_test_etcd_data")
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to make temp etcd data dir: %v", err)
|
||||
}
|
||||
glog.Infof("storing etcd data in: %v", etcdDataDir)
|
||||
`
|
||||
|
||||
etcdCmd := exec.Command(
|
||||
etcdPath,
|
||||
"--data-dir",
|
||||
etcdDataDir,
|
||||
"--listen-client-urls",
|
||||
GetEtcdURL(),
|
||||
"--advertise-client-urls",
|
||||
GetEtcdURL(),
|
||||
"--listen-peer-urls",
|
||||
"http://127.0.0.1:0",
|
||||
)
|
||||
|
||||
stdout, err := etcdCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to run etcd: %v", err)
|
||||
}
|
||||
stderr, err := etcdCmd.StderrPipe()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to run etcd: %v", err)
|
||||
}
|
||||
if err := etcdCmd.Start(); err != nil {
|
||||
glog.Fatalf("Failed to run etcd: %v", err)
|
||||
}
|
||||
|
||||
go io.Copy(os.Stdout, stdout)
|
||||
go io.Copy(os.Stderr, stderr)
|
||||
|
||||
go func() {
|
||||
if err := etcdCmd.Wait(); err != nil {
|
||||
glog.Fatalf("Failed to run etcd: %v", err)
|
||||
}
|
||||
glog.Fatalf("etcd should not have succeeded")
|
||||
}()
|
||||
})
|
||||
// getEtcdPath returns a path to an etcd executable.
|
||||
func getEtcdPath() (string, error) {
|
||||
bazelPath := filepath.Join(os.Getenv("RUNFILES_DIR"), "com_coreos_etcd/etcd")
|
||||
p, err := exec.LookPath(bazelPath)
|
||||
if err == nil {
|
||||
return p, nil
|
||||
}
|
||||
return exec.LookPath("etcd")
|
||||
}
|
||||
|
||||
// getAvailablePort returns a TCP port that is available for binding.
|
||||
func getAvailablePort() (int, error) {
|
||||
l, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("could not bind to a port: %v", err)
|
||||
}
|
||||
// It is possible but unlikely that someone else will bind this port before we
|
||||
// get a chance to use it.
|
||||
defer l.Close()
|
||||
return l.Addr().(*net.TCPAddr).Port, nil
|
||||
}
|
||||
|
||||
// startEtcd executes an etcd instance. The returned function will signal the
|
||||
// etcd process and wait for it to exit.
|
||||
func startEtcd() (func(), error) {
|
||||
etcdURL = env.GetEnvAsStringOrFallback("KUBE_INTEGRATION_ETCD_URL", "http://127.0.0.1:2379")
|
||||
conn, err := net.Dial("tcp", strings.TrimPrefix(etcdURL, "http://"))
|
||||
if err == nil {
|
||||
klog.Infof("etcd already running at %s", etcdURL)
|
||||
conn.Close()
|
||||
return func() {}, nil
|
||||
}
|
||||
klog.V(1).Infof("could not connect to etcd: %v", err)
|
||||
|
||||
// TODO: Check for valid etcd version.
|
||||
etcdPath, err := getEtcdPath()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, installEtcd)
|
||||
return nil, fmt.Errorf("could not find etcd in PATH: %v", err)
|
||||
}
|
||||
etcdPort, err := getAvailablePort()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get a port: %v", err)
|
||||
}
|
||||
etcdURL = fmt.Sprintf("http://127.0.0.1:%d", etcdPort)
|
||||
klog.Infof("starting etcd on %s", etcdURL)
|
||||
|
||||
etcdDataDir, err := ioutil.TempDir(os.TempDir(), "integration_test_etcd_data")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to make temp etcd data dir: %v", err)
|
||||
}
|
||||
klog.Infof("storing etcd data in: %v", etcdDataDir)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cmd := exec.CommandContext(
|
||||
ctx,
|
||||
etcdPath,
|
||||
"--data-dir",
|
||||
etcdDataDir,
|
||||
"--listen-client-urls",
|
||||
GetEtcdURL(),
|
||||
"--advertise-client-urls",
|
||||
GetEtcdURL(),
|
||||
"--listen-peer-urls",
|
||||
"http://127.0.0.1:0",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
stop := func() {
|
||||
cancel()
|
||||
err := cmd.Wait()
|
||||
klog.Infof("etcd exit status: %v", err)
|
||||
err = os.RemoveAll(etcdDataDir)
|
||||
if err != nil {
|
||||
klog.Warningf("error during etcd cleanup: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("failed to run etcd: %v", err)
|
||||
}
|
||||
return stop, nil
|
||||
}
|
||||
|
||||
// EtcdMain starts an etcd instance before running tests.
|
||||
func EtcdMain(tests func() int) {
|
||||
setupETCD()
|
||||
os.Exit(tests())
|
||||
stop, err := startEtcd()
|
||||
if err != nil {
|
||||
klog.Fatalf("cannot run integration tests: unable to start etcd: %v", err)
|
||||
}
|
||||
result := tests()
|
||||
stop() // Don't defer this. See os.Exit documentation.
|
||||
os.Exit(result)
|
||||
}
|
||||
|
||||
// return the EtcdURL
|
||||
// GetEtcdURL returns the URL of the etcd instance started by EtcdMain.
|
||||
func GetEtcdURL() string {
|
||||
return etcdURL
|
||||
}
|
||||
|
54
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
54
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
@ -24,10 +24,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pborman/uuid"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
auditreg "k8s.io/api/auditregistration/v1alpha1"
|
||||
autoscaling "k8s.io/api/autoscaling/v1"
|
||||
certificates "k8s.io/api/certificates/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -82,9 +83,11 @@ func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorize
|
||||
}
|
||||
|
||||
// alwaysEmpty simulates "no authentication" for old tests
|
||||
func alwaysEmpty(req *http.Request) (user.Info, bool, error) {
|
||||
return &user.DefaultInfo{
|
||||
Name: "",
|
||||
func alwaysEmpty(req *http.Request) (*authauthenticator.Response, bool, error) {
|
||||
return &authauthenticator.Response{
|
||||
User: &user.DefaultInfo{
|
||||
Name: "",
|
||||
},
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
@ -119,6 +122,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
closeFn := func() {
|
||||
m.GenericAPIServer.RunPreShutdownHooks()
|
||||
close(stopCh)
|
||||
s.Close()
|
||||
}
|
||||
@ -174,14 +178,14 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
|
||||
clientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
klog.Fatal(err)
|
||||
}
|
||||
|
||||
sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
|
||||
m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.NewEmptyDelegate())
|
||||
masterConfig.ExtraConfig.VersionedInformers = informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
|
||||
m, err = masterConfig.Complete().New(genericapiserver.NewEmptyDelegate())
|
||||
if err != nil {
|
||||
closeFn()
|
||||
glog.Fatalf("error in bringing up the master: %v", err)
|
||||
klog.Fatalf("error in bringing up the master: %v", err)
|
||||
}
|
||||
if masterReceiver != nil {
|
||||
masterReceiver.SetMaster(m)
|
||||
@ -198,7 +202,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
privilegedClient, err := restclient.RESTClientFor(&cfg)
|
||||
if err != nil {
|
||||
closeFn()
|
||||
glog.Fatal(err)
|
||||
klog.Fatal(err)
|
||||
}
|
||||
var lastHealthContent []byte
|
||||
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
@ -213,8 +217,8 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
})
|
||||
if err != nil {
|
||||
closeFn()
|
||||
glog.Errorf("last health content: %q", string(lastHealthContent))
|
||||
glog.Fatal(err)
|
||||
klog.Errorf("last health content: %q", string(lastHealthContent))
|
||||
klog.Fatal(err)
|
||||
}
|
||||
|
||||
return m, s, closeFn
|
||||
@ -225,6 +229,10 @@ func NewIntegrationTestMasterConfig() *master.Config {
|
||||
masterConfig := NewMasterConfig()
|
||||
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
|
||||
masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()
|
||||
|
||||
// TODO: get rid of these tests or port them to secure serving
|
||||
masterConfig.GenericConfig.SecureServing = &genericapiserver.SecureServingInfo{Listener: fakeLocalhost443Listener{}}
|
||||
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
@ -285,12 +293,19 @@ func NewMasterConfig() *master.Config {
|
||||
schema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: auditreg.GroupName, Resource: serverstorage.AllResources},
|
||||
"",
|
||||
ns)
|
||||
|
||||
genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs)
|
||||
kubeVersion := version.Get()
|
||||
genericConfig.Version = &kubeVersion
|
||||
genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
|
||||
|
||||
// TODO: get rid of these tests or port them to secure serving
|
||||
genericConfig.SecureServing = &genericapiserver.SecureServingInfo{Listener: fakeLocalhost443Listener{}}
|
||||
|
||||
err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -329,3 +344,20 @@ func SharedEtcd() *storagebackend.Config {
|
||||
cfg.ServerList = []string{GetEtcdURL()}
|
||||
return cfg
|
||||
}
|
||||
|
||||
type fakeLocalhost443Listener struct{}
|
||||
|
||||
func (fakeLocalhost443Listener) Accept() (net.Conn, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fakeLocalhost443Listener) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fakeLocalhost443Listener) Addr() net.Addr {
|
||||
return &net.TCPAddr{
|
||||
IP: net.IPv4(127, 0, 0, 1),
|
||||
Port: 443,
|
||||
}
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
e2eframework "k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -51,7 +51,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
numNodes += v.Count
|
||||
}
|
||||
|
||||
glog.Infof("Making %d nodes", numNodes)
|
||||
klog.Infof("Making %d nodes", numNodes)
|
||||
baseNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: p.nodeNamePrefix,
|
||||
@ -77,7 +77,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating node: %v", err)
|
||||
klog.Fatalf("Error creating node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -88,7 +88,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
sum += v.Count
|
||||
for ; index < sum; index++ {
|
||||
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
|
||||
glog.Errorf("Aborting node preparation: %v", err)
|
||||
klog.Errorf("Aborting node preparation: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -100,7 +100,7 @@ func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
||||
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
||||
for i := range nodes.Items {
|
||||
if err := p.client.CoreV1().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil {
|
||||
glog.Errorf("Error while deleting Node: %v", err)
|
||||
klog.Errorf("Error while deleting Node: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/framework/serializer.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/framework/serializer.go
generated
vendored
@ -45,9 +45,9 @@ func (s *wrappedSerializer) UniversalDeserializer() runtime.Decoder {
|
||||
}
|
||||
|
||||
func (s *wrappedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
|
||||
return versioning.NewCodec(encoder, nil, s.scheme, s.scheme, s.scheme, s.scheme, gv, nil)
|
||||
return versioning.NewCodec(encoder, nil, s.scheme, s.scheme, s.scheme, s.scheme, gv, nil, s.scheme.Name())
|
||||
}
|
||||
|
||||
func (s *wrappedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
|
||||
return versioning.NewCodec(nil, decoder, s.scheme, s.scheme, s.scheme, s.scheme, nil, gv)
|
||||
return versioning.NewCodec(nil, decoder, s.scheme, s.scheme, s.scheme, s.scheme, nil, gv, s.scheme.Name())
|
||||
}
|
||||
|
38
vendor/k8s.io/kubernetes/test/integration/garbagecollector/BUILD
generated
vendored
38
vendor/k8s.io/kubernetes/test/integration/garbagecollector/BUILD
generated
vendored
@ -12,27 +12,27 @@ go_test(
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//pkg/controller/garbagecollector:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
12
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -812,7 +812,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) {
|
||||
|
||||
// Create a custom owner resource.
|
||||
owner := newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner"))
|
||||
owner, err := resourceClient.Create(owner)
|
||||
owner, err := resourceClient.Create(owner, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create owner resource %q: %v", owner.GetName(), err)
|
||||
}
|
||||
@ -822,7 +822,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) {
|
||||
dependent := newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("dependent"))
|
||||
link(t, owner, dependent)
|
||||
|
||||
dependent, err = resourceClient.Create(dependent)
|
||||
dependent, err = resourceClient.Create(dependent, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create dependent resource %q: %v", dependent.GetName(), err)
|
||||
}
|
||||
@ -873,7 +873,7 @@ func TestMixedRelationships(t *testing.T) {
|
||||
definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, dynamicClient, ns.Name)
|
||||
|
||||
// Create a custom owner resource.
|
||||
customOwner, err := resourceClient.Create(newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner")))
|
||||
customOwner, err := resourceClient.Create(newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner")), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create owner: %v", err)
|
||||
}
|
||||
@ -900,7 +900,7 @@ func TestMixedRelationships(t *testing.T) {
|
||||
coreOwner.TypeMeta.Kind = "ConfigMap"
|
||||
coreOwner.TypeMeta.APIVersion = "v1"
|
||||
link(t, coreOwner, customDependent)
|
||||
customDependent, err = resourceClient.Create(customDependent)
|
||||
customDependent, err = resourceClient.Create(customDependent, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create dependent: %v", err)
|
||||
}
|
||||
@ -971,7 +971,7 @@ func TestCRDDeletionCascading(t *testing.T) {
|
||||
definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, dynamicClient, ns.Name)
|
||||
|
||||
// Create a custom owner resource.
|
||||
owner, err := resourceClient.Create(newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner")))
|
||||
owner, err := resourceClient.Create(newCRDInstance(definition, ns.Name, names.SimpleNameGenerator.GenerateName("owner")), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create owner: %v", err)
|
||||
}
|
||||
|
32
vendor/k8s.io/kubernetes/test/integration/ipamperf/BUILD
generated
vendored
32
vendor/k8s.io/kubernetes/test/integration/ipamperf/BUILD
generated
vendored
@ -14,13 +14,13 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/controller/nodeipam:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -47,24 +47,24 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/ipamperf",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
14
vendor/k8s.io/kubernetes/test/integration/ipamperf/ipam_test.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/integration/ipamperf/ipam_test.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
@ -65,7 +65,7 @@ func setupAllocator(apiURL string, config *Config, clusterCIDR, serviceCIDR *net
|
||||
|
||||
func runTest(t *testing.T, apiURL string, config *Config, clusterCIDR, serviceCIDR *net.IPNet, subnetMaskSize int) (*Results, error) {
|
||||
t.Helper()
|
||||
glog.Infof("Running test %s", t.Name())
|
||||
klog.Infof("Running test %s", t.Name())
|
||||
|
||||
defer deleteNodes(apiURL, config) // cleanup nodes on after controller shutdown
|
||||
|
||||
@ -85,7 +85,7 @@ func runTest(t *testing.T, apiURL string, config *Config, clusterCIDR, serviceCI
|
||||
}
|
||||
|
||||
results := o.Results(t.Name(), config)
|
||||
glog.Infof("Results: %s", results)
|
||||
klog.Infof("Results: %s", results)
|
||||
if !results.Succeeded {
|
||||
t.Errorf("%s: Not allocations succeeded", t.Name())
|
||||
}
|
||||
@ -95,16 +95,16 @@ func runTest(t *testing.T, apiURL string, config *Config, clusterCIDR, serviceCI
|
||||
func logResults(allResults []*Results) {
|
||||
jStr, err := json.MarshalIndent(allResults, "", " ")
|
||||
if err != nil {
|
||||
glog.Errorf("Error formating results: %v", err)
|
||||
klog.Errorf("Error formatting results: %v", err)
|
||||
return
|
||||
}
|
||||
if resultsLogFile != "" {
|
||||
glog.Infof("Logging results to %s", resultsLogFile)
|
||||
klog.Infof("Logging results to %s", resultsLogFile)
|
||||
if err := ioutil.WriteFile(resultsLogFile, jStr, os.FileMode(0644)); err != nil {
|
||||
glog.Errorf("Error logging results to %s: %v", resultsLogFile, err)
|
||||
klog.Errorf("Error logging results to %s: %v", resultsLogFile, err)
|
||||
}
|
||||
}
|
||||
glog.Infof("AllResults:\n%s", string(jStr))
|
||||
klog.Infof("AllResults:\n%s", string(jStr))
|
||||
}
|
||||
|
||||
func TestPerformance(t *testing.T) {
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/ipamperf/main_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/ipamperf/main_test.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"flag"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@ -59,7 +59,7 @@ func TestMain(m *testing.M) {
|
||||
case string(ipam.IPAMFromClusterAllocatorType):
|
||||
customConfig.AllocatorType = ipam.IPAMFromClusterAllocatorType
|
||||
default:
|
||||
glog.Fatalf("Unknown allocator type: %s", allocator)
|
||||
klog.Fatalf("Unknown allocator type: %s", allocator)
|
||||
}
|
||||
|
||||
framework.EtcdMain(m.Run)
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/ipamperf/results.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/ipamperf/results.go
generated
vendored
@ -23,12 +23,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
)
|
||||
@ -96,7 +96,7 @@ func NewObserver(clientSet *clientset.Clientset, numNodes int) *Observer {
|
||||
// Call Results() to get the test results after starting observer.
|
||||
func (o *Observer) StartObserving() error {
|
||||
o.monitor()
|
||||
glog.Infof("Test observer started")
|
||||
klog.Infof("Test observer started")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -174,12 +174,12 @@ func (o *Observer) monitor() {
|
||||
nTime.podCIDR = newNode.Spec.PodCIDR
|
||||
o.numAllocated++
|
||||
if o.numAllocated%10 == 0 {
|
||||
glog.Infof("progress: %d/%d - %.2d%%", o.numAllocated, o.numNodes, (o.numAllocated * 100.0 / o.numNodes))
|
||||
klog.Infof("progress: %d/%d - %.2d%%", o.numAllocated, o.numNodes, (o.numAllocated * 100.0 / o.numNodes))
|
||||
}
|
||||
// do following check only if numAllocated is modified, as otherwise, redundant updates
|
||||
// can cause wg.Done() to be called multiple times, causing a panic
|
||||
if o.numAdded == o.numNodes && o.numAllocated == o.numNodes {
|
||||
glog.Info("All nodes assigned podCIDR")
|
||||
klog.Info("All nodes assigned podCIDR")
|
||||
o.wg.Done()
|
||||
}
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/test/integration/ipamperf/util.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/integration/ipamperf/util.go
generated
vendored
@ -19,7 +19,6 @@ package ipamperf
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -27,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -54,7 +54,7 @@ var (
|
||||
)
|
||||
|
||||
func deleteNodes(apiURL string, config *Config) {
|
||||
glog.Info("Deleting nodes")
|
||||
klog.Info("Deleting nodes")
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: apiURL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
|
||||
@ -63,7 +63,7 @@ func deleteNodes(apiURL string, config *Config) {
|
||||
})
|
||||
noGrace := int64(0)
|
||||
if err := clientSet.CoreV1().Nodes().DeleteCollection(&metav1.DeleteOptions{GracePeriodSeconds: &noGrace}, metav1.ListOptions{}); err != nil {
|
||||
glog.Errorf("Error deleting node: %v", err)
|
||||
klog.Errorf("Error deleting node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -74,22 +74,22 @@ func createNodes(apiURL string, config *Config) error {
|
||||
QPS: float32(config.CreateQPS),
|
||||
Burst: config.CreateQPS,
|
||||
})
|
||||
glog.Infof("Creating %d nodes", config.NumNodes)
|
||||
klog.Infof("Creating %d nodes", config.NumNodes)
|
||||
for i := 0; i < config.NumNodes; i++ {
|
||||
var err error
|
||||
for j := 0; j < maxCreateRetries; j++ {
|
||||
if _, err = clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil && errors.IsServerTimeout(err) {
|
||||
glog.Infof("Server timeout creating nodes, retrying after %v", retryDelay)
|
||||
klog.Infof("Server timeout creating nodes, retrying after %v", retryDelay)
|
||||
time.Sleep(retryDelay)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating nodes: %v", err)
|
||||
klog.Errorf("Error creating nodes: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
glog.Infof("%d nodes created", config.NumNodes)
|
||||
klog.Infof("%d nodes created", config.NumNodes)
|
||||
return nil
|
||||
}
|
||||
|
226
vendor/k8s.io/kubernetes/test/integration/master/BUILD
generated
vendored
226
vendor/k8s.io/kubernetes/test/integration/master/BUILD
generated
vendored
@ -10,44 +10,14 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"audit_test.go",
|
||||
"crd_test.go",
|
||||
"kms_transformation_test.go",
|
||||
"kube_apiserver_test.go",
|
||||
"main_test.go",
|
||||
"secrets_transformation_test.go",
|
||||
"synthetic_master_test.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"kms_transformation_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
@ -56,70 +26,77 @@ go_test(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//staging/src/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/networking/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/config/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/etcd:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/networking/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/group:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/evanphx/json-patch:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
@ -141,114 +118,73 @@ filegroup(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kms_plugin_mock.go",
|
||||
"transformation_testcase.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"kms_plugin_mock.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/master",
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/config/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
|
260
vendor/k8s.io/kubernetes/test/integration/master/audit_test.go
generated
vendored
Normal file
260
vendor/k8s.io/kubernetes/test/integration/master/audit_test.go
generated
vendored
Normal file
@ -0,0 +1,260 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
|
||||
auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
var (
|
||||
auditPolicyPattern = `
|
||||
apiVersion: {version}
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
|
||||
`
|
||||
namespace = "default"
|
||||
watchTestTimeout int64 = 1
|
||||
watchOptions = metav1.ListOptions{TimeoutSeconds: &watchTestTimeout}
|
||||
patch, _ = json.Marshal(jsonpatch.Patch{})
|
||||
auditTestUser = "system:apiserver"
|
||||
versions = map[string]schema.GroupVersion{
|
||||
"audit.k8s.io/v1": auditv1.SchemeGroupVersion,
|
||||
"audit.k8s.io/v1beta1": auditv1beta1.SchemeGroupVersion,
|
||||
}
|
||||
)
|
||||
|
||||
// TestAudit ensures that both v1beta1 and v1 version audit api could work.
|
||||
func TestAudit(t *testing.T) {
|
||||
for version := range versions {
|
||||
testAudit(t, version)
|
||||
}
|
||||
}
|
||||
|
||||
func testAudit(t *testing.T, version string) {
|
||||
// prepare audit policy file
|
||||
auditPolicy := []byte(strings.Replace(auditPolicyPattern, "{version}", version, 1))
|
||||
policyFile, err := ioutil.TempFile("", "audit-policy.yaml")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create audit policy file: %v", err)
|
||||
}
|
||||
defer os.Remove(policyFile.Name())
|
||||
if _, err := policyFile.Write(auditPolicy); err != nil {
|
||||
t.Fatalf("Failed to write audit policy file: %v", err)
|
||||
}
|
||||
if err := policyFile.Close(); err != nil {
|
||||
t.Fatalf("Failed to close audit policy file: %v", err)
|
||||
}
|
||||
|
||||
// prepare audit log file
|
||||
logFile, err := ioutil.TempFile("", "audit.log")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create audit log file: %v", err)
|
||||
}
|
||||
defer os.Remove(logFile.Name())
|
||||
|
||||
// start api server
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil,
|
||||
[]string{
|
||||
"--audit-policy-file", policyFile.Name(),
|
||||
"--audit-log-version", version,
|
||||
"--audit-log-mode", "blocking",
|
||||
"--audit-log-path", logFile.Name()},
|
||||
framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
|
||||
kubeclient, err := kubernetes.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
func() {
|
||||
// create, get, watch, update, patch, list and delete configmap.
|
||||
configMap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-configmap",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"map-key": "map-value",
|
||||
},
|
||||
}
|
||||
|
||||
_, err := kubeclient.CoreV1().ConfigMaps(namespace).Create(configMap)
|
||||
expectNoError(t, err, "failed to create audit-configmap")
|
||||
|
||||
_, err = kubeclient.CoreV1().ConfigMaps(namespace).Get(configMap.Name, metav1.GetOptions{})
|
||||
expectNoError(t, err, "failed to get audit-configmap")
|
||||
|
||||
configMapChan, err := kubeclient.CoreV1().ConfigMaps(namespace).Watch(watchOptions)
|
||||
expectNoError(t, err, "failed to create watch for config maps")
|
||||
configMapChan.Stop()
|
||||
|
||||
_, err = kubeclient.CoreV1().ConfigMaps(namespace).Update(configMap)
|
||||
expectNoError(t, err, "failed to update audit-configmap")
|
||||
|
||||
_, err = kubeclient.CoreV1().ConfigMaps(namespace).Patch(configMap.Name, types.JSONPatchType, patch)
|
||||
expectNoError(t, err, "failed to patch configmap")
|
||||
|
||||
_, err = kubeclient.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{})
|
||||
expectNoError(t, err, "failed to list config maps")
|
||||
|
||||
err = kubeclient.CoreV1().ConfigMaps(namespace).Delete(configMap.Name, &metav1.DeleteOptions{})
|
||||
expectNoError(t, err, "failed to delete audit-configmap")
|
||||
}()
|
||||
|
||||
expectedEvents := []utils.AuditEvent{
|
||||
{
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps", namespace),
|
||||
Verb: "create",
|
||||
Code: 201,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: true,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
Verb: "get",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: false,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps", namespace),
|
||||
Verb: "list",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: false,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseStarted,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps?timeout=%ds&timeoutSeconds=%d&watch=true", namespace, watchTestTimeout, watchTestTimeout),
|
||||
Verb: "watch",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: false,
|
||||
ResponseObject: false,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps?timeout=%ds&timeoutSeconds=%d&watch=true", namespace, watchTestTimeout, watchTestTimeout),
|
||||
Verb: "watch",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: false,
|
||||
ResponseObject: false,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
Verb: "update",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: true,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
Verb: "patch",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: true,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
}, {
|
||||
Level: auditinternal.LevelRequestResponse,
|
||||
Stage: auditinternal.StageResponseComplete,
|
||||
RequestURI: fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
Verb: "delete",
|
||||
Code: 200,
|
||||
User: auditTestUser,
|
||||
Resource: "configmaps",
|
||||
Namespace: namespace,
|
||||
RequestObject: true,
|
||||
ResponseObject: true,
|
||||
AuthorizeDecision: "allow",
|
||||
},
|
||||
}
|
||||
|
||||
stream, err := os.Open(logFile.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
defer stream.Close()
|
||||
missing, err := utils.CheckAuditLines(stream, expectedEvents, versions[version])
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
t.Errorf("Failed to match all expected events, events %#v not found!", missing)
|
||||
}
|
||||
}
|
||||
|
||||
func expectNoError(t *testing.T, err error, msg string) {
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", msg, err)
|
||||
}
|
||||
}
|
67
vendor/k8s.io/kubernetes/test/integration/master/crd_test.go
generated
vendored
67
vendor/k8s.io/kubernetes/test/integration/master/crd_test.go
generated
vendored
@ -18,7 +18,6 @@ package master
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -37,6 +36,7 @@ import (
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration/etcd"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -81,12 +81,8 @@ func TestCRDShadowGroup(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil {
|
||||
t.Fatalf("Failed to create networking group CRD: %v", err)
|
||||
}
|
||||
if err := waitForEstablishedCRD(apiextensionsclient, crd.Name); err != nil {
|
||||
t.Fatalf("Failed to establish networking group CRD: %v", err)
|
||||
}
|
||||
etcd.CreateTestCRDs(t, apiextensionsclient, true, crd)
|
||||
|
||||
// wait to give aggregator time to update
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
@ -97,11 +93,7 @@ func TestCRDShadowGroup(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Logf("Checking that crd resource does not show up in networking group")
|
||||
found, err := crdExistsInDiscovery(apiextensionsclient, crd)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected discovery error: %v", err)
|
||||
}
|
||||
if found {
|
||||
if etcd.CrdExistsInDiscovery(apiextensionsclient, crd) {
|
||||
t.Errorf("CRD resource shows up in discovery, but shouldn't.")
|
||||
}
|
||||
}
|
||||
@ -137,17 +129,7 @@ func TestCRD(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil {
|
||||
t.Fatalf("Failed to create foos.cr.bar.com CRD; %v", err)
|
||||
}
|
||||
if err := waitForEstablishedCRD(apiextensionsclient, crd.Name); err != nil {
|
||||
t.Fatalf("Failed to establish foos.cr.bar.com CRD: %v", err)
|
||||
}
|
||||
if err := wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
return crdExistsInDiscovery(apiextensionsclient, crd)
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to see foos.cr.bar.com in discovery: %v", err)
|
||||
}
|
||||
etcd.CreateTestCRDs(t, apiextensionsclient, false, crd)
|
||||
|
||||
t.Logf("Trying to access foos.cr.bar.com with dynamic client")
|
||||
dynamicClient, err := dynamic.NewForConfig(result.ClientConfig)
|
||||
@ -199,7 +181,7 @@ func TestCRD(t *testing.T) {
|
||||
}
|
||||
createErr := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := dynamicClient.Resource(fooResource).Namespace("default").Create(unstructuredFoo)
|
||||
_, err := dynamicClient.Resource(fooResource).Namespace("default").Create(unstructuredFoo, metav1.CreateOptions{})
|
||||
t.Logf("Foo instance create returned: %v", err)
|
||||
if err != nil {
|
||||
createErr <- err
|
||||
@ -272,7 +254,7 @@ func TestCRD(t *testing.T) {
|
||||
}
|
||||
fooUnstructured.UnmarshalJSON(bs)
|
||||
|
||||
_, err = dynamicClient.Resource(fooResource).Namespace("default").Update(fooUnstructured)
|
||||
_, err = dynamicClient.Resource(fooResource).Namespace("default").Update(fooUnstructured, metav1.UpdateOptions{})
|
||||
if err != nil && !errors.IsConflict(err) {
|
||||
t.Fatalf("Failed to update Foo instance: %v", err)
|
||||
} else if err == nil {
|
||||
@ -306,38 +288,3 @@ func unstructuredFoo(foo *Foo) (*unstructured.Unstructured, error) {
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func waitForEstablishedCRD(client apiextensionsclientset.Interface, name string) error {
|
||||
return wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, cond := range crd.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case apiextensionsv1beta1.Established:
|
||||
if cond.Status == apiextensionsv1beta1.ConditionTrue {
|
||||
return true, err
|
||||
}
|
||||
case apiextensionsv1beta1.NamesAccepted:
|
||||
if cond.Status == apiextensionsv1beta1.ConditionFalse {
|
||||
fmt.Printf("Name conflict: %v\n", cond.Reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func crdExistsInDiscovery(client apiextensionsclientset.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) (bool, error) {
|
||||
resourceList, err := client.Discovery().ServerResourcesForGroupVersion(crd.Spec.Group + "/" + crd.Spec.Version)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, resource := range resourceList.APIResources {
|
||||
if resource.Name == crd.Spec.Names.Plural {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
25
vendor/k8s.io/kubernetes/test/integration/master/kms_plugin_mock.go
generated
vendored
25
vendor/k8s.io/kubernetes/test/integration/master/kms_plugin_mock.go
generated
vendored
@ -23,18 +23,16 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kmsapi "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
kmsAPIVersion = "v1beta1"
|
||||
sockFile = "/tmp/kms-provider.sock"
|
||||
sockFile = "@kms-provider.sock"
|
||||
unixProtocol = "unix"
|
||||
)
|
||||
|
||||
@ -49,15 +47,11 @@ type base64Plugin struct {
|
||||
}
|
||||
|
||||
func NewBase64Plugin() (*base64Plugin, error) {
|
||||
if err := cleanSockFile(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listener, err := net.Listen(unixProtocol, sockFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to listen on the unix socket, error: %v", err)
|
||||
}
|
||||
glog.Infof("Listening on %s", sockFile)
|
||||
klog.Infof("Listening on %s", sockFile)
|
||||
|
||||
server := grpc.NewServer()
|
||||
|
||||
@ -75,7 +69,6 @@ func NewBase64Plugin() (*base64Plugin, error) {
|
||||
func (s *base64Plugin) cleanUp() {
|
||||
s.grpcServer.Stop()
|
||||
s.listener.Close()
|
||||
cleanSockFile()
|
||||
}
|
||||
|
||||
var testProviderAPIVersion = kmsAPIVersion
|
||||
@ -85,7 +78,7 @@ func (s *base64Plugin) Version(ctx context.Context, request *kmsapi.VersionReque
|
||||
}
|
||||
|
||||
func (s *base64Plugin) Decrypt(ctx context.Context, request *kmsapi.DecryptRequest) (*kmsapi.DecryptResponse, error) {
|
||||
glog.Infof("Received Decrypt Request for DEK: %s", string(request.Cipher))
|
||||
klog.Infof("Received Decrypt Request for DEK: %s", string(request.Cipher))
|
||||
|
||||
buf := make([]byte, base64.StdEncoding.DecodedLen(len(request.Cipher)))
|
||||
n, err := base64.StdEncoding.Decode(buf, request.Cipher)
|
||||
@ -97,7 +90,7 @@ func (s *base64Plugin) Decrypt(ctx context.Context, request *kmsapi.DecryptReque
|
||||
}
|
||||
|
||||
func (s *base64Plugin) Encrypt(ctx context.Context, request *kmsapi.EncryptRequest) (*kmsapi.EncryptResponse, error) {
|
||||
glog.Infof("Received Encrypt Request for DEK: %x", request.Plain)
|
||||
klog.Infof("Received Encrypt Request for DEK: %x", request.Plain)
|
||||
s.encryptRequest <- request
|
||||
|
||||
buf := make([]byte, base64.StdEncoding.EncodedLen(len(request.Plain)))
|
||||
@ -105,11 +98,3 @@ func (s *base64Plugin) Encrypt(ctx context.Context, request *kmsapi.EncryptReque
|
||||
|
||||
return &kmsapi.EncryptResponse{Cipher: buf}, nil
|
||||
}
|
||||
|
||||
func cleanSockFile() error {
|
||||
err := unix.Unlink(sockFile)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to delete the socket file, error: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/test/integration/master/kms_transformation_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/integration/master/kms_transformation_test.go
generated
vendored
@ -39,8 +39,8 @@ const (
|
||||
dekKeySizeLen = 2
|
||||
|
||||
kmsConfigYAML = `
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
kind: EncryptionConfiguration
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
@ -48,7 +48,7 @@ resources:
|
||||
- kms:
|
||||
name: grpc-kms-provider
|
||||
cachesize: 1000
|
||||
endpoint: unix:///tmp/kms-provider.sock
|
||||
endpoint: unix:///@kms-provider.sock
|
||||
`
|
||||
)
|
||||
|
||||
@ -72,7 +72,7 @@ func (r rawDEKKEKSecret) getPayload() []byte {
|
||||
return r[r.getStartOfPayload():]
|
||||
}
|
||||
|
||||
// TestKMSProvider is an integration test between KubAPI, ETCD and KMS Plugin
|
||||
// TestKMSProvider is an integration test between KubeAPI, ETCD and KMS Plugin
|
||||
// Concretely, this test verifies the following integration contracts:
|
||||
// 1. Raw records in ETCD that were processed by KMS Provider should be prefixed with k8s:enc:kms:v1:grpc-kms-provider-name:
|
||||
// 2. Data Encryption Key (DEK) should be generated by envelopeTransformer and passed to KMS gRPC Plugin
|
||||
|
5
vendor/k8s.io/kubernetes/test/integration/master/kube_apiserver_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/integration/master/kube_apiserver_test.go
generated
vendored
@ -19,6 +19,7 @@ package master
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -98,8 +99,8 @@ func TestOpenAPIDelegationChainPlumbing(t *testing.T) {
|
||||
result := kubeclient.RESTClient().Get().AbsPath("/swagger.json").Do()
|
||||
status := 0
|
||||
result.StatusCode(&status)
|
||||
if status != 200 {
|
||||
t.Fatalf("GET /swagger.json failed: expected status=%d, got=%d", 200, status)
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("GET /swagger.json failed: expected status=%d, got=%d", http.StatusOK, status)
|
||||
}
|
||||
|
||||
raw, err := result.Raw()
|
||||
|
20
vendor/k8s.io/kubernetes/test/integration/master/secrets_transformation_test.go
generated
vendored
20
vendor/k8s.io/kubernetes/test/integration/master/secrets_transformation_test.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apiserver/pkg/server/options/encryptionconfig"
|
||||
apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes"
|
||||
)
|
||||
@ -33,8 +33,8 @@ const (
|
||||
aesCBCPrefix = "k8s:enc:aescbc:v1:key1:"
|
||||
|
||||
aesGCMConfigYAML = `
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
kind: EncryptionConfiguration
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
@ -46,8 +46,8 @@ resources:
|
||||
`
|
||||
|
||||
aesCBCConfigYAML = `
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
kind: EncryptionConfiguration
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
@ -59,8 +59,8 @@ resources:
|
||||
`
|
||||
|
||||
identityConfigYAML = `
|
||||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
kind: EncryptionConfiguration
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
@ -72,7 +72,7 @@ resources:
|
||||
// TestSecretsShouldBeEnveloped is an integration test between KubeAPI and etcd that checks:
|
||||
// 1. Secrets are encrypted on write
|
||||
// 2. Secrets are decrypted on read
|
||||
// when EncryptionConfig is passed to KubeAPI server.
|
||||
// when EncryptionConfiguration is passed to KubeAPI server.
|
||||
func TestSecretsShouldBeTransformed(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
transformerConfigContent string
|
||||
@ -128,7 +128,7 @@ func runBenchmark(b *testing.B, transformerConfig string) {
|
||||
}
|
||||
|
||||
func unSealWithGCMTransformer(cipherText []byte, ctx value.Context,
|
||||
transformerConfig encryptionconfig.ProviderConfig) ([]byte, error) {
|
||||
transformerConfig apiserverconfigv1.ProviderConfiguration) ([]byte, error) {
|
||||
|
||||
block, err := newAESCipher(transformerConfig.AESGCM.Keys[0].Secret)
|
||||
if err != nil {
|
||||
@ -146,7 +146,7 @@ func unSealWithGCMTransformer(cipherText []byte, ctx value.Context,
|
||||
}
|
||||
|
||||
func unSealWithCBCTransformer(cipherText []byte, ctx value.Context,
|
||||
transformerConfig encryptionconfig.ProviderConfig) ([]byte, error) {
|
||||
transformerConfig apiserverconfigv1.ProviderConfiguration) ([]byte, error) {
|
||||
|
||||
block, err := newAESCipher(transformerConfig.AESCBC.Keys[0].Secret)
|
||||
if err != nil {
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/master/synthetic_master_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/master/synthetic_master_test.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -304,7 +304,7 @@ func TestObjectSizeResponses(t *testing.T) {
|
||||
const DeploymentTwoMegabyteSize = 1000000
|
||||
|
||||
expectedMsgFor1MB := `etcdserver: request is too large`
|
||||
expectedMsgFor2MB := `rpc error: code = ResourceExhausted desc = grpc: trying to send message larger than max`
|
||||
expectedMsgFor2MB := `rpc error: code = ResourceExhausted desc = trying to send message larger than max`
|
||||
expectedMsgForLargeAnnotation := `metadata.annotations: Too long: must have at most 262144 characters`
|
||||
|
||||
deployment1 := constructBody("a", DeploymentMegabyteSize, "labels", t) // >1 MB file
|
||||
@ -862,7 +862,7 @@ func TestUpdateNodeObjects(t *testing.T) {
|
||||
Reason: "bar",
|
||||
},
|
||||
}
|
||||
case i%4 == 1:
|
||||
case i%4 == 2:
|
||||
lastCount = 0
|
||||
n.Status.Conditions = nil
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/test/integration/master/transformation_testcase.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/integration/master/transformation_testcase.go
generated
vendored
@ -28,12 +28,12 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/server/options/encryptionconfig"
|
||||
apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@ -51,7 +51,7 @@ const (
|
||||
metricsPrefix = "apiserver_storage_"
|
||||
)
|
||||
|
||||
type unSealSecret func(cipherText []byte, ctx value.Context, config encryptionconfig.ProviderConfig) ([]byte, error)
|
||||
type unSealSecret func(cipherText []byte, ctx value.Context, config apiserverconfigv1.ProviderConfiguration) ([]byte, error)
|
||||
|
||||
type transformTest struct {
|
||||
logger kubeapiservertesting.Logger
|
||||
@ -164,7 +164,7 @@ func (e *transformTest) getRawSecretFromETCD() ([]byte, error) {
|
||||
|
||||
func (e *transformTest) getEncryptionOptions() []string {
|
||||
if e.transformerConfig != "" {
|
||||
return []string{"--experimental-encryption-provider-config", path.Join(e.configDir, encryptionConfigFileName)}
|
||||
return []string{"--encryption-provider-config", path.Join(e.configDir, encryptionConfigFileName)}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -186,8 +186,8 @@ func (e *transformTest) createEncryptionConfig() (string, error) {
|
||||
return tempDir, nil
|
||||
}
|
||||
|
||||
func (e *transformTest) getEncryptionConfig() (*encryptionconfig.ProviderConfig, error) {
|
||||
var config encryptionconfig.EncryptionConfig
|
||||
func (e *transformTest) getEncryptionConfig() (*apiserverconfigv1.ProviderConfiguration, error) {
|
||||
var config apiserverconfigv1.EncryptionConfiguration
|
||||
err := yaml.Unmarshal([]byte(e.transformerConfig), &config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract transformer key: %v", err)
|
||||
@ -228,7 +228,7 @@ func (e *transformTest) createSecret(name, namespace string) (*corev1.Secret, er
|
||||
}
|
||||
|
||||
func (e *transformTest) readRawRecordFromETCD(path string) (*clientv3.GetResponse, error) {
|
||||
etcdClient, err := integration.GetEtcdKVClient(e.kubeAPIServer.ServerOpts.Etcd.StorageConfig)
|
||||
_, etcdClient, err := integration.GetEtcdClients(e.kubeAPIServer.ServerOpts.Etcd.StorageConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create etcd client: %v", err)
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
@ -35,13 +35,13 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
6
vendor/k8s.io/kubernetes/test/integration/metrics/metrics_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/metrics/metrics_test.go
generated
vendored
@ -30,9 +30,9 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/golang/protobuf/proto"
|
||||
prometheuspb "github.com/prometheus/client_model/go"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const scrapeRequestHeader = "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text"
|
||||
@ -54,7 +54,7 @@ func scrapeMetrics(s *httptest.Server) ([]*prometheuspb.MetricFamily, error) {
|
||||
return nil, fmt.Errorf("Unable to contact metrics endpoint of master: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("Non-200 response trying to scrape metrics from master: %v", resp)
|
||||
}
|
||||
|
||||
@ -66,7 +66,7 @@ func scrapeMetrics(s *httptest.Server) ([]*prometheuspb.MetricFamily, error) {
|
||||
if err := proto.UnmarshalText(scanner.Text(), &metric); err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal line of metrics response: %v", err)
|
||||
}
|
||||
glog.V(4).Infof("Got metric %q", metric.GetName())
|
||||
klog.V(4).Infof("Got metric %q", metric.GetName())
|
||||
metrics = append(metrics, &metric)
|
||||
}
|
||||
return metrics, nil
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
@ -14,13 +14,13 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
@ -15,8 +15,8 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/master:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/pods/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/pods/BUILD
generated
vendored
@ -14,13 +14,13 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
31
vendor/k8s.io/kubernetes/test/integration/quota/BUILD
generated
vendored
31
vendor/k8s.io/kubernetes/test/integration/quota/BUILD
generated
vendored
@ -14,28 +14,27 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/controller/resourcequota:go_default_library",
|
||||
"//pkg/quota/generic:go_default_library",
|
||||
"//pkg/quota/install:go_default_library",
|
||||
"//pkg/quota/v1/generic:go_default_library",
|
||||
"//pkg/quota/v1/install:go_default_library",
|
||||
"//plugin/pkg/admission/resourcequota:go_default_library",
|
||||
"//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
32
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package quota
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@ -35,13 +36,12 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
quotainstall "k8s.io/kubernetes/pkg/quota/install"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/generic"
|
||||
quotainstall "k8s.io/kubernetes/pkg/quota/v1/install"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/resourcequota"
|
||||
resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@ -63,15 +63,14 @@ func TestQuota(t *testing.T) {
|
||||
|
||||
admissionCh := make(chan struct{})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
config := &resourcequotaapi.Configuration{}
|
||||
admission, err := resourcequota.NewResourceQuota(config, 5, admissionCh)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
admission.SetInternalKubeClientSet(internalClientset)
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, controller.NoResyncPeriodFunc())
|
||||
admission.SetInternalKubeInformerFactory(internalInformers)
|
||||
admission.SetExternalKubeClientSet(clientset)
|
||||
internalInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
admission.SetExternalKubeInformerFactory(internalInformers)
|
||||
qca := quotainstall.NewQuotaConfigurationForAdmission()
|
||||
admission.SetQuotaConfiguration(qca)
|
||||
defer close(admissionCh)
|
||||
@ -161,7 +160,9 @@ func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Cl
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
default:
|
||||
@ -218,7 +219,9 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
default:
|
||||
@ -251,7 +254,6 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
|
||||
|
||||
admissionCh := make(chan struct{})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
|
||||
// stop creation of a pod resource unless there is a quota
|
||||
config := &resourcequotaapi.Configuration{
|
||||
@ -267,9 +269,9 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
admission.SetInternalKubeClientSet(internalClientset)
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, controller.NoResyncPeriodFunc())
|
||||
admission.SetInternalKubeInformerFactory(internalInformers)
|
||||
admission.SetExternalKubeClientSet(clientset)
|
||||
externalInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||
admission.SetQuotaConfiguration(qca)
|
||||
defer close(admissionCh)
|
||||
|
||||
@ -318,7 +320,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
|
||||
// Periodically the quota controller to detect new resource types
|
||||
go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, controllerCh)
|
||||
|
||||
internalInformers.Start(controllerCh)
|
||||
externalInformers.Start(controllerCh)
|
||||
informers.Start(controllerCh)
|
||||
close(informersStarted)
|
||||
|
||||
|
28
vendor/k8s.io/kubernetes/test/integration/replicaset/BUILD
generated
vendored
28
vendor/k8s.io/kubernetes/test/integration/replicaset/BUILD
generated
vendored
@ -17,22 +17,22 @@ go_test(
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
34
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
@ -112,32 +112,6 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingObjects verifies if the number of the remaining replica
|
||||
// sets and pods are rsNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rsNum, podNum int) (bool, error) {
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(namespace)
|
||||
podClient := clientSet.CoreV1().Pods(namespace)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
var ret = true
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rss, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replica sets: %v", err)
|
||||
}
|
||||
if len(rss.Items) != rsNum {
|
||||
ret = false
|
||||
t.Logf("expect %d RSs, got %d RSs", rsNum, len(rss.Items))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func rmSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.ReplicaSetController, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
@ -366,9 +340,7 @@ func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rs *a
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain rs %s: %v", rs.Name, err)
|
||||
}
|
||||
kind := "ReplicaSet"
|
||||
scaleClient := c.ExtensionsV1beta1().Scales(ns)
|
||||
scale, err := scaleClient.Get(kind, rs.Name)
|
||||
scale, err := c.AppsV1().ReplicaSets(ns).GetScale(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain scale subresource for rs %s: %v", rs.Name, err)
|
||||
}
|
||||
@ -377,12 +349,12 @@ func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rs *a
|
||||
}
|
||||
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
scale, err := scaleClient.Get(kind, rs.Name)
|
||||
scale, err := c.AppsV1().ReplicaSets(ns).GetScale(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scale.Spec.Replicas = replicas
|
||||
_, err = scaleClient.Update(kind, scale)
|
||||
_, err = c.AppsV1().ReplicaSets(ns).UpdateScale(rs.Name, scale)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to set .Spec.Replicas of scale subresource for rs %s: %v", rs.Name, err)
|
||||
|
24
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/BUILD
generated
vendored
24
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/BUILD
generated
vendored
@ -16,19 +16,19 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -105,32 +105,6 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingObjects verifies if the number of the remaining replication
|
||||
// controllers and pods are rcNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(namespace)
|
||||
podClient := clientSet.CoreV1().Pods(namespace)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
var ret = true
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
if len(rcs.Items) != rcNum {
|
||||
ret = false
|
||||
t.Logf("expect %d RCs, got %d RCs", rcNum, len(rcs.Items))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func rmSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s, closeFn := framework.RunAMaster(masterConfig)
|
||||
@ -366,9 +340,7 @@ func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rc *v
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain rc %s: %v", rc.Name, err)
|
||||
}
|
||||
kind := "ReplicationController"
|
||||
scaleClient := c.ExtensionsV1beta1().Scales(ns)
|
||||
scale, err := scaleClient.Get(kind, rc.Name)
|
||||
scale, err := c.CoreV1().ReplicationControllers(ns).GetScale(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain scale subresource for rc %s: %v", rc.Name, err)
|
||||
}
|
||||
@ -377,12 +349,12 @@ func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rc *v
|
||||
}
|
||||
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
scale, err := scaleClient.Get(kind, rc.Name)
|
||||
scale, err := c.CoreV1().ReplicationControllers(ns).GetScale(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scale.Spec.Replicas = replicas
|
||||
_, err = scaleClient.Update(kind, scale)
|
||||
_, err = c.CoreV1().ReplicationControllers(ns).UpdateScale(rc.Name, scale)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to set .Spec.Replicas of scale subresource for rc %s: %v", rc.Name, err)
|
||||
|
12
vendor/k8s.io/kubernetes/test/integration/scale/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/test/integration/scale/BUILD
generated
vendored
@ -12,15 +12,15 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
92
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
92
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
@ -25,47 +25,46 @@ go_test(
|
||||
"//cmd/kube-scheduler/app:go_default_library",
|
||||
"//cmd/kube-scheduler/app/config:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume/options:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -96,26 +95,27 @@ go_library(
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
@ -349,7 +349,7 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
}
|
||||
policy.APIVersion = "v1"
|
||||
|
||||
context = initTestScheduler(t, context, nil, false, &policy)
|
||||
context = initTestScheduler(t, context, false, &policy)
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
DoTestPodScheduling(context.ns, t, clientSet)
|
||||
|
30
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
@ -36,7 +36,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
|
||||
@ -52,7 +52,7 @@ func waitForNominatedNodeNameWithTimeout(cs clientset.Interface, pod *v1.Pod, ti
|
||||
}
|
||||
return false, err
|
||||
}); err != nil {
|
||||
return fmt.Errorf("Pod %v annotation did not get set: %v", pod.Name, err)
|
||||
return fmt.Errorf("Pod %v/%v annotation did not get set: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -268,7 +268,7 @@ func TestPreemption(t *testing.T) {
|
||||
for i, p := range pods {
|
||||
if _, found := test.preemptedPodIndexes[i]; found {
|
||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Test [%v]: Pod %v is not getting evicted.", test.description, p.Name)
|
||||
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.description, p.Namespace, p.Name)
|
||||
}
|
||||
} else {
|
||||
if p.DeletionTimestamp != nil {
|
||||
@ -450,7 +450,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
// make sure that runningPods are all scheduled.
|
||||
for _, p := range runningPods {
|
||||
if err := waitForPodToSchedule(cs, p); err != nil {
|
||||
t.Fatalf("Pod %v didn't get scheduled: %v", p.Name, err)
|
||||
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
// Create pending pods.
|
||||
@ -464,7 +464,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
for _, p := range pendingPods {
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout,
|
||||
podUnschedulable(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Pod %v didn't get marked unschedulable: %v", p.Name, err)
|
||||
t.Errorf("Pod %v/%v didn't get marked unschedulable: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
// Create the preemptor.
|
||||
@ -474,14 +474,14 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
}
|
||||
// Check that the preemptor pod gets the annotation for nominated node name.
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
||||
}
|
||||
// Make sure that preemptor is scheduled after preemptions.
|
||||
if err := waitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||
t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err)
|
||||
}
|
||||
// Cleanup
|
||||
glog.Info("Cleaning up all pods...")
|
||||
klog.Info("Cleaning up all pods...")
|
||||
allPods := pendingPods
|
||||
allPods = append(allPods, runningPods...)
|
||||
allPods = append(allPods, preemptor)
|
||||
@ -532,7 +532,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// make sure that the pods are all scheduled.
|
||||
for _, p := range lowPriPods {
|
||||
if err := waitForPodToSchedule(cs, p); err != nil {
|
||||
t.Fatalf("Pod %v didn't get scheduled: %v", p.Name, err)
|
||||
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
// Step 2. Create a medium priority pod.
|
||||
@ -551,7 +551,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
}
|
||||
// Step 3. Check that nominated node name of the medium priority pod is set.
|
||||
if err := waitForNominatedNodeName(cs, medPriPod); err != nil {
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err)
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v/%v: %v", medPriPod.Namespace, medPriPod.Name, err)
|
||||
}
|
||||
// Step 4. Create a high priority pod.
|
||||
podConf = initPausePod(cs, &pausePodConfig{
|
||||
@ -569,7 +569,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
}
|
||||
// Step 5. Check that nominated node name of the high priority pod is set.
|
||||
if err := waitForNominatedNodeName(cs, highPriPod); err != nil {
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err)
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v/%v: %v", medPriPod.Namespace, medPriPod.Name, err)
|
||||
}
|
||||
// And the nominated node name of the medium priority pod is cleared.
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
@ -842,8 +842,8 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
t.Fatalf("Failed to create PDB: %v", err)
|
||||
}
|
||||
}
|
||||
// Wait for PDBs to show up in the scheduler's cache and become stable.
|
||||
if err := waitCachedPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil {
|
||||
// Wait for PDBs to become stable.
|
||||
if err := waitForPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil {
|
||||
t.Fatalf("Not all pdbs are stable in the cache: %v", err)
|
||||
}
|
||||
|
||||
@ -856,18 +856,18 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
for i, p := range pods {
|
||||
if _, found := test.preemptedPodIndexes[i]; found {
|
||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Test [%v]: Pod %v is not getting evicted.", test.description, p.Name)
|
||||
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.description, p.Namespace, p.Name)
|
||||
}
|
||||
} else {
|
||||
if p.DeletionTimestamp != nil {
|
||||
t.Errorf("Test [%v]: Didn't expect pod %v to get preempted.", test.description, p.Name)
|
||||
t.Errorf("Test [%v]: Didn't expect pod %v/%v to get preempted.", test.description, p.Namespace, p.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Also check that the preemptor pod gets the annotation for nominated node name.
|
||||
if len(test.preemptedPodIndexes) > 0 {
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
62
vendor/k8s.io/kubernetes/test/integration/scheduler/priorities_test.go
generated
vendored
62
vendor/k8s.io/kubernetes/test/integration/scheduler/priorities_test.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// This file tests the scheduler priority functions.
|
||||
@ -172,3 +173,64 @@ func TestPodAffinity(t *testing.T) {
|
||||
}
|
||||
t.Errorf("Pod %v got scheduled on an unexpected node: %v.", podName, pod.Spec.NodeName)
|
||||
}
|
||||
|
||||
// TestImageLocality verifies that the scheduler's image locality priority function
|
||||
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
|
||||
func TestImageLocality(t *testing.T) {
|
||||
context := initTest(t, "image-locality")
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
// Add a few nodes.
|
||||
_, err := createNodes(context.clientSet, "testnode", nil, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create nodes: %v", err)
|
||||
}
|
||||
|
||||
// We use a fake large image as the test image used by the pod, which has relatively large image size.
|
||||
image := v1.ContainerImage{
|
||||
Names: []string{
|
||||
"fake-large-image:v1",
|
||||
},
|
||||
SizeBytes: 3000 * 1024 * 1024,
|
||||
}
|
||||
|
||||
// Create a node with the large image
|
||||
nodeWithLargeImage, err := createNodeWithImages(context.clientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create node with a large image: %v", err)
|
||||
}
|
||||
|
||||
// Create a pod with containers each having the specified image.
|
||||
podName := "pod-using-large-image"
|
||||
pod, err := runPodWithContainers(context.clientSet, initPodWithContainers(context.clientSet, &podWithContainersConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Containers: makeContainersWithImages(image.Names),
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("error running pod with images: %v", err)
|
||||
}
|
||||
if pod.Spec.NodeName != nodeWithLargeImage.Name {
|
||||
t.Errorf("pod %v got scheduled on an unexpected node: %v. Expected node: %v.", podName, pod.Spec.NodeName, nodeWithLargeImage.Name)
|
||||
} else {
|
||||
t.Logf("pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// makeContainerWithImage returns a list of v1.Container objects for each given image. Duplicates of an image are ignored,
|
||||
// i.e., each image is used only once.
|
||||
func makeContainersWithImages(images []string) []v1.Container {
|
||||
var containers []v1.Container
|
||||
usedImages := make(map[string]struct{})
|
||||
|
||||
for _, image := range images {
|
||||
if _, ok := usedImages[image]; !ok {
|
||||
containers = append(containers, v1.Container{
|
||||
Name: strings.Replace(image, ":", "-", -1) + "-container",
|
||||
Image: image,
|
||||
})
|
||||
usedImages[image] = struct{}{}
|
||||
}
|
||||
}
|
||||
return containers
|
||||
}
|
||||
|
131
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
131
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
@ -20,19 +20,13 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
@ -45,11 +39,11 @@ import (
|
||||
schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@ -140,6 +134,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"GeneralPredicates",
|
||||
"MatchInterPodAffinity",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxCSIVolumeCountPred",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"NoDiskConflict",
|
||||
@ -154,6 +149,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"NodePreferAvoidPodsPriority",
|
||||
"SelectorSpreadPriority",
|
||||
"TaintTolerationPriority",
|
||||
"ImageLocalityPriority",
|
||||
),
|
||||
},
|
||||
{
|
||||
@ -173,7 +169,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
configPolicyName := fmt.Sprintf("scheduler-custom-policy-config-%d", i)
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{componentconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
Data: map[string]string{kubeschedulerconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
}
|
||||
|
||||
policyConfigMap.APIVersion = "v1"
|
||||
@ -182,18 +178,20 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
defaultBindTimeout := int64(30)
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
BindTimeoutSeconds: &defaultBindTimeout,
|
||||
},
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
@ -243,18 +241,20 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
defaultBindTimeout := int64(30)
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: "non-existent-config",
|
||||
Name: "non-existent-config",
|
||||
},
|
||||
},
|
||||
},
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
BindTimeoutSeconds: &defaultBindTimeout,
|
||||
},
|
||||
Client: clientSet,
|
||||
InformerFactory: informerFactory,
|
||||
@ -522,7 +522,10 @@ func TestMultiScheduler(t *testing.T) {
|
||||
informerFactory2 := informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||
podInformer2 := factory.NewPodInformer(context.clientSet, 0)
|
||||
|
||||
schedulerConfigFactory2 := createConfiguratorWithPodInformer(fooScheduler, clientSet2, podInformer2, informerFactory2)
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
schedulerConfigFactory2 := createConfiguratorWithPodInformer(fooScheduler, clientSet2, podInformer2, informerFactory2, stopCh)
|
||||
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||
@ -530,12 +533,11 @@ func TestMultiScheduler(t *testing.T) {
|
||||
eventBroadcaster2 := record.NewBroadcaster()
|
||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: fooScheduler})
|
||||
eventBroadcaster2.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet2.CoreV1().Events("")})
|
||||
go podInformer2.Informer().Run(schedulerConfig2.StopEverything)
|
||||
informerFactory2.Start(schedulerConfig2.StopEverything)
|
||||
go podInformer2.Informer().Run(stopCh)
|
||||
informerFactory2.Start(stopCh)
|
||||
|
||||
sched2, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig2}, nil...)
|
||||
sched2.Run()
|
||||
defer close(schedulerConfig2.StopEverything)
|
||||
|
||||
// 6. **check point-2**:
|
||||
// - testPodWithAnnotationFitsFoo should be scheduled
|
||||
@ -665,95 +667,6 @@ func TestAllocatable(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestPDBCache verifies that scheduler cache works as expected when handling
|
||||
// PodDisruptionBudget.
|
||||
func TestPDBCache(t *testing.T) {
|
||||
context := initTest(t, "pdbcache")
|
||||
defer cleanupTest(t, context)
|
||||
|
||||
intstrMin := intstr.FromInt(4)
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: context.ns.Name,
|
||||
Name: "test-pdb",
|
||||
UID: types.UID("test-pdb-uid"),
|
||||
Labels: map[string]string{"tkey1": "tval1", "tkey2": "tval2"},
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
MinAvailable: &intstrMin,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"tkey": "tvalue"}},
|
||||
},
|
||||
}
|
||||
|
||||
createdPDB, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create PDB: %v", err)
|
||||
}
|
||||
// Wait for PDB to show up in the scheduler's cache.
|
||||
if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while polling for PDB: %v", err)
|
||||
return false, err
|
||||
}
|
||||
return len(cachedPDBs) > 0, err
|
||||
}); err != nil {
|
||||
t.Fatalf("No PDB was added to the cache: %v", err)
|
||||
}
|
||||
// Read PDB from the cache and compare it.
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if len(cachedPDBs) != 1 {
|
||||
t.Fatalf("Expected to have 1 pdb in cache, but found %d.", len(cachedPDBs))
|
||||
}
|
||||
if !reflect.DeepEqual(createdPDB, cachedPDBs[0]) {
|
||||
t.Errorf("Got different PDB than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(createdPDB, cachedPDBs[0]))
|
||||
}
|
||||
|
||||
// Update PDB and change its labels.
|
||||
pdbCopy := *cachedPDBs[0]
|
||||
pdbCopy.Labels = map[string]string{}
|
||||
updatedPDB, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Update(&pdbCopy)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to update PDB: %v", err)
|
||||
}
|
||||
// Wait for PDB to be updated in the scheduler's cache.
|
||||
if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while polling for PDB: %v", err)
|
||||
return false, err
|
||||
}
|
||||
return len(cachedPDBs[0].Labels) == 0, err
|
||||
}); err != nil {
|
||||
t.Fatalf("No PDB was updated in the cache: %v", err)
|
||||
}
|
||||
// Read PDB from the cache and compare it.
|
||||
cachedPDBs, err = context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if len(cachedPDBs) != 1 {
|
||||
t.Errorf("Expected to have 1 pdb in cache, but found %d.", len(cachedPDBs))
|
||||
}
|
||||
if !reflect.DeepEqual(updatedPDB, cachedPDBs[0]) {
|
||||
t.Errorf("Got different PDB than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(updatedPDB, cachedPDBs[0]))
|
||||
}
|
||||
|
||||
// Delete PDB.
|
||||
err = context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Delete(pdb.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete PDB: %v", err)
|
||||
}
|
||||
// Wait for PDB to be deleted from the scheduler's cache.
|
||||
if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while polling for PDB: %v", err)
|
||||
return false, err
|
||||
}
|
||||
return len(cachedPDBs) == 0, err
|
||||
}); err != nil {
|
||||
t.Errorf("No PDB was deleted from the cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedulerInformers tests that scheduler receives informer events and updates its cache when
|
||||
// pods are scheduled by other schedulers.
|
||||
func TestSchedulerInformers(t *testing.T) {
|
||||
|
739
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
739
vendor/k8s.io/kubernetes/test/integration/scheduler/taint_test.go
generated
vendored
@ -19,7 +19,7 @@ package scheduler
|
||||
// This file tests the Taint feature.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -28,22 +28,38 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
)
|
||||
|
||||
// TestTaintNodeByCondition verifies:
|
||||
// 1. MemoryPressure Toleration is added to non-BestEffort Pod by PodTolerationRestriction
|
||||
// 2. NodeController taints nodes by node condition
|
||||
// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailable
|
||||
func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: req,
|
||||
Limits: limit,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature.
|
||||
func TestTaintNodeByCondition(t *testing.T) {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition")
|
||||
defer func() {
|
||||
@ -60,34 +76,32 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
context := initTestMaster(t, "default", admission)
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{
|
||||
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: -1,
|
||||
Host: context.httpServer.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, time.Second)
|
||||
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
||||
|
||||
kubeadmission.WantsInternalKubeClientSet(admission).SetInternalKubeClientSet(internalClientset)
|
||||
kubeadmission.WantsInternalKubeInformerFactory(admission).SetInternalKubeInformerFactory(internalInformers)
|
||||
|
||||
controllerCh := make(chan struct{})
|
||||
defer close(controllerCh)
|
||||
admission.SetExternalKubeClientSet(externalClientset)
|
||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||
|
||||
// Apply feature gates to enable TaintNodesByCondition
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
context = initTestScheduler(t, context, controllerCh, false, nil)
|
||||
clientset := context.clientSet
|
||||
context = initTestScheduler(t, context, false, nil)
|
||||
cs := context.clientSet
|
||||
informers := context.informerFactory
|
||||
nsName := context.ns.Name
|
||||
|
||||
// Start NodeLifecycleController for taint.
|
||||
nc, err := nodelifecycle.NewNodeLifecycleController(
|
||||
informers.Coordination().V1beta1().Leases(),
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Core().V1().Nodes(),
|
||||
informers.Extensions().V1beta1().DaemonSets(),
|
||||
nil, // CloudProvider
|
||||
clientset,
|
||||
time.Second, // Node monitor grace period
|
||||
cs,
|
||||
time.Hour, // Node monitor grace period
|
||||
time.Second, // Node startup grace period
|
||||
time.Second, // Node monitor period
|
||||
time.Second, // Pod eviction timeout
|
||||
@ -103,92 +117,380 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
t.Errorf("Failed to create node controller: %v", err)
|
||||
return
|
||||
}
|
||||
go nc.Run(controllerCh)
|
||||
go nc.Run(context.stopCh)
|
||||
|
||||
// Waiting for all controller sync.
|
||||
internalInformers.Start(controllerCh)
|
||||
internalInformers.WaitForCacheSync(controllerCh)
|
||||
externalInformers.Start(context.stopCh)
|
||||
externalInformers.WaitForCacheSync(context.stopCh)
|
||||
informers.Start(context.stopCh)
|
||||
informers.WaitForCacheSync(context.stopCh)
|
||||
|
||||
// -------------------------------------------
|
||||
// Test TaintNodeByCondition feature.
|
||||
// -------------------------------------------
|
||||
memoryPressureToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
nodeRes := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
}
|
||||
|
||||
podRes := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
}
|
||||
|
||||
notReadyToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
// Case 1: Add MememoryPressure Toleration for non-BestEffort pod.
|
||||
burstablePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "burstable-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
unreachableToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unschedulableToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
outOfDiskToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeOutOfDisk,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
memoryPressureToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
diskPressureToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
networkUnavailableToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
pidPressureToleration := v1.Toleration{
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
bestEffortPod := newPod(nsName, "besteffort-pod", nil, nil)
|
||||
burstablePod := newPod(nsName, "burstable-pod", podRes, nil)
|
||||
guaranteePod := newPod(nsName, "guarantee-pod", podRes, podRes)
|
||||
|
||||
type podCase struct {
|
||||
pod *v1.Pod
|
||||
tolerations []v1.Toleration
|
||||
fits bool
|
||||
}
|
||||
|
||||
// switch to table driven testings
|
||||
tests := []struct {
|
||||
name string
|
||||
existingTaints []v1.Taint
|
||||
nodeConditions []v1.NodeCondition
|
||||
unschedulable bool
|
||||
expectedTaints []v1.Taint
|
||||
pods []podCase
|
||||
}{
|
||||
{
|
||||
name: "not-ready node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{notReadyToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unreachable node",
|
||||
existingTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown, // node status is "Unknown"
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{unreachableToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unschedulable node",
|
||||
unschedulable: true, // node.spec.unschedulable = true
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{unschedulableToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "out of disk node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeOutOfDisk,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// In OutOfDisk condition, only pods with toleration can be scheduled.
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{outOfDiskToleration},
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{diskPressureToleration},
|
||||
fits: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "memory pressure node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// In MemoryPressure condition, both Burstable and Guarantee pods are scheduled;
|
||||
// BestEffort pod with toleration are also scheduled.
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{memoryPressureToleration},
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{diskPressureToleration},
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "disk pressure node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// In DiskPressure condition, only pods with toleration can be scheduled.
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{diskPressureToleration},
|
||||
fits: true,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{memoryPressureToleration},
|
||||
fits: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "network unavailable and node is ready",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
tolerations: []v1.Toleration{
|
||||
networkUnavailableToleration,
|
||||
},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
burstablePodInServ, err := clientset.CoreV1().Pods(nsName).Create(burstablePod)
|
||||
if err != nil {
|
||||
t.Errorf("Case 1: Failed to create pod: %v", err)
|
||||
} else if !reflect.DeepEqual(burstablePodInServ.Spec.Tolerations, []v1.Toleration{memoryPressureToleration}) {
|
||||
t.Errorf("Case 1: Unexpected toleration of non-BestEffort pod, expected: %+v, got: %v",
|
||||
[]v1.Toleration{memoryPressureToleration},
|
||||
burstablePodInServ.Spec.Tolerations)
|
||||
}
|
||||
|
||||
// Case 2: No MemoryPressure Toleration for BestEffort pod.
|
||||
besteffortPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "best-effort-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
besteffortPodInServ, err := clientset.CoreV1().Pods(nsName).Create(besteffortPod)
|
||||
if err != nil {
|
||||
t.Errorf("Case 2: Failed to create pod: %v", err)
|
||||
} else if len(besteffortPodInServ.Spec.Tolerations) != 0 {
|
||||
t.Errorf("Case 2: Unexpected toleration # of BestEffort pod, expected: 0, got: %v",
|
||||
len(besteffortPodInServ.Spec.Tolerations))
|
||||
}
|
||||
|
||||
// Case 3: Taint Node by NetworkUnavailable condition.
|
||||
networkUnavailableNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
name: "network unavailable and node is not ready",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
@ -198,116 +500,175 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeInformerCh := make(chan bool)
|
||||
nodeInformer := informers.Core().V1().Nodes().Informer()
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
if curNode.Name != "node-1" {
|
||||
return
|
||||
}
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeNetworkUnavailable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
nodeInformerCh <- true
|
||||
break
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
if _, err := clientset.CoreV1().Nodes().Create(networkUnavailableNode); err != nil {
|
||||
t.Errorf("Case 3: Failed to create node: %v", err)
|
||||
} else {
|
||||
select {
|
||||
case <-time.After(60 * time.Second):
|
||||
t.Errorf("Case 3: Failed to taint node after 60s.")
|
||||
case <-nodeInformerCh:
|
||||
}
|
||||
}
|
||||
|
||||
// Case 4: Schedule Pod with NetworkUnavailable toleration.
|
||||
networkDaemonPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "network-daemon-pod",
|
||||
Namespace: nsName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "busybox",
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
Tolerations: []v1.Toleration{
|
||||
pods: []podCase{
|
||||
{
|
||||
Key: algorithm.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
tolerations: []v1.Toleration{
|
||||
networkUnavailableToleration,
|
||||
},
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
tolerations: []v1.Toleration{
|
||||
networkUnavailableToleration,
|
||||
notReadyToleration,
|
||||
},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pid pressure node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
pods: []podCase{
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: burstablePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: guaranteePod,
|
||||
fits: false,
|
||||
},
|
||||
{
|
||||
pod: bestEffortPod,
|
||||
tolerations: []v1.Toleration{pidPressureToleration},
|
||||
fits: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi taints on node",
|
||||
nodeConditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
expectedTaints: []v1.Taint{
|
||||
{
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := clientset.CoreV1().Pods(nsName).Create(networkDaemonPod); err != nil {
|
||||
t.Errorf("Case 4: Failed to create pod for network daemon: %v", err)
|
||||
} else {
|
||||
if err := waitForPodToScheduleWithTimeout(clientset, networkDaemonPod, time.Second*60); err != nil {
|
||||
t.Errorf("Case 4: Failed to schedule network daemon pod in 60s.")
|
||||
}
|
||||
}
|
||||
|
||||
// Case 5: Taint node by unschedulable condition
|
||||
unschedulableNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-2",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
v1.ResourcePods: resource.MustParse("110"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeInformerCh2 := make(chan bool)
|
||||
nodeInformer2 := informers.Core().V1().Nodes().Informer()
|
||||
nodeInformer2.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curNode := cur.(*v1.Node)
|
||||
if curNode.Name != "node-2" {
|
||||
return
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-1",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: test.unschedulable,
|
||||
Taints: test.existingTaints,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: nodeRes,
|
||||
Allocatable: nodeRes,
|
||||
Conditions: test.nodeConditions,
|
||||
},
|
||||
}
|
||||
|
||||
for _, taint := range curNode.Spec.Taints {
|
||||
if taint.Key == algorithm.TaintNodeUnschedulable &&
|
||||
taint.Effect == v1.TaintEffectNoSchedule {
|
||||
nodeInformerCh2 <- true
|
||||
break
|
||||
if _, err := cs.CoreV1().Nodes().Create(node); err != nil {
|
||||
t.Errorf("Failed to create node, err: %v", err)
|
||||
}
|
||||
if err := waitForNodeTaints(cs, node, test.expectedTaints); err != nil {
|
||||
t.Errorf("Failed to taint node <%s>, err: %v", node.Name, err)
|
||||
}
|
||||
|
||||
var pods []*v1.Pod
|
||||
for i, p := range test.pods {
|
||||
pod := p.pod.DeepCopy()
|
||||
pod.Name = fmt.Sprintf("%s-%d", pod.Name, i)
|
||||
pod.Spec.Tolerations = p.tolerations
|
||||
|
||||
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod %s/%s, error: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
pods = append(pods, createdPod)
|
||||
|
||||
if p.fits {
|
||||
if err := waitForPodToSchedule(cs, createdPod); err != nil {
|
||||
t.Errorf("Failed to schedule pod %s/%s on the node, err: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
if err := waitForPodUnschedulable(cs, createdPod); err != nil {
|
||||
t.Errorf("Unschedulable pod %s/%s gets scheduled on the node, err: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
if _, err := clientset.CoreV1().Nodes().Create(unschedulableNode); err != nil {
|
||||
t.Errorf("Case 5: Failed to create node: %v", err)
|
||||
} else {
|
||||
select {
|
||||
case <-time.After(60 * time.Second):
|
||||
t.Errorf("Case 5: Failed to taint node after 60s.")
|
||||
case <-nodeInformerCh2:
|
||||
}
|
||||
cleanupPods(cs, t, pods)
|
||||
cleanupNodes(cs, t)
|
||||
waitForSchedulerCacheCleanup(context.scheduler, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
198
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
198
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
@ -51,6 +51,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -61,9 +62,10 @@ type TestContext struct {
|
||||
ns *v1.Namespace
|
||||
clientSet *clientset.Clientset
|
||||
informerFactory informers.SharedInformerFactory
|
||||
schedulerConfigFactory scheduler.Configurator
|
||||
schedulerConfig *scheduler.Config
|
||||
schedulerConfigFactory factory.Configurator
|
||||
schedulerConfig *factory.Config
|
||||
scheduler *scheduler.Scheduler
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// createConfiguratorWithPodInformer creates a configurator for scheduler.
|
||||
@ -72,30 +74,36 @@ func createConfiguratorWithPodInformer(
|
||||
clientSet clientset.Interface,
|
||||
podInformer coreinformers.PodInformer,
|
||||
informerFactory informers.SharedInformerFactory,
|
||||
) scheduler.Configurator {
|
||||
return factory.NewConfigFactory(
|
||||
schedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
podInformer,
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
false,
|
||||
)
|
||||
stopCh <-chan struct{},
|
||||
) factory.Configurator {
|
||||
return factory.NewConfigFactory(&factory.ConfigFactoryArgs{
|
||||
SchedulerName: schedulerName,
|
||||
Client: clientSet,
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
PodInformer: podInformer,
|
||||
PvInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
PvcInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ReplicationControllerInformer: informerFactory.Core().V1().ReplicationControllers(),
|
||||
ReplicaSetInformer: informerFactory.Apps().V1().ReplicaSets(),
|
||||
StatefulSetInformer: informerFactory.Apps().V1().StatefulSets(),
|
||||
ServiceInformer: informerFactory.Core().V1().Services(),
|
||||
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
EnableEquivalenceClassCache: utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
DisablePreemption: false,
|
||||
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
BindTimeoutSeconds: 600,
|
||||
StopCh: stopCh,
|
||||
})
|
||||
}
|
||||
|
||||
// initTestMasterAndScheduler initializes a test environment and creates a master with default
|
||||
// configuration.
|
||||
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
|
||||
var context TestContext
|
||||
context := TestContext{
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
// 1. Create master
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
@ -135,13 +143,12 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
|
||||
func initTestScheduler(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
controllerCh chan struct{},
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
) *TestContext {
|
||||
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
||||
// feature gate is enabled at the same time.
|
||||
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false)
|
||||
return initTestSchedulerWithOptions(t, context, setPodInformer, policy, false, true, time.Second)
|
||||
}
|
||||
|
||||
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||
@ -149,19 +156,21 @@ func initTestScheduler(
|
||||
func initTestSchedulerWithOptions(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
controllerCh chan struct{},
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
disablePreemption bool,
|
||||
disableEquivalenceCache bool,
|
||||
resyncPeriod time.Duration,
|
||||
) *TestContext {
|
||||
// Enable EnableEquivalenceClassCache for all integration tests.
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(
|
||||
t,
|
||||
utilfeature.DefaultFeatureGate,
|
||||
features.EnableEquivalenceClassCache, true)()
|
||||
if !disableEquivalenceCache {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(
|
||||
t,
|
||||
utilfeature.DefaultFeatureGate,
|
||||
features.EnableEquivalenceClassCache, true)()
|
||||
}
|
||||
|
||||
// 1. Create scheduler
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, time.Second)
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod)
|
||||
|
||||
var podInformer coreinformers.PodInformer
|
||||
|
||||
@ -173,7 +182,7 @@ func initTestSchedulerWithOptions(
|
||||
}
|
||||
|
||||
context.schedulerConfigFactory = createConfiguratorWithPodInformer(
|
||||
v1.DefaultSchedulerName, context.clientSet, podInformer, context.informerFactory)
|
||||
v1.DefaultSchedulerName, context.clientSet, podInformer, context.informerFactory, context.stopCh)
|
||||
|
||||
var err error
|
||||
|
||||
@ -187,11 +196,6 @@ func initTestSchedulerWithOptions(
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
|
||||
// set controllerCh if provided.
|
||||
if controllerCh != nil {
|
||||
context.schedulerConfig.StopEverything = controllerCh
|
||||
}
|
||||
|
||||
// set DisablePreemption option
|
||||
context.schedulerConfig.DisablePreemption = disablePreemption
|
||||
|
||||
@ -246,21 +250,21 @@ func initDisruptionController(context *TestContext) *disruption.DisruptionContro
|
||||
// initTest initializes a test environment and creates master and scheduler with default
|
||||
// configuration.
|
||||
func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), nil, true, nil)
|
||||
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), true, nil)
|
||||
}
|
||||
|
||||
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
|
||||
// configuration but with pod preemption disabled.
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestSchedulerWithOptions(
|
||||
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true)
|
||||
t, initTestMaster(t, nsPrefix, nil), true, nil, true, true, time.Second)
|
||||
}
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
// at the end of a test.
|
||||
func cleanupTest(t *testing.T, context *TestContext) {
|
||||
// Kill the scheduler.
|
||||
close(context.schedulerConfig.StopEverything)
|
||||
close(context.stopCh)
|
||||
// Cleanup nodes.
|
||||
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
framework.DeleteTestingNamespace(context.ns, context.httpServer, t)
|
||||
@ -322,24 +326,35 @@ func waitForNodeLabels(cs clientset.Interface, nodeName string, labels map[strin
|
||||
return wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, nodeHasLabels(cs, nodeName, labels))
|
||||
}
|
||||
|
||||
// createNode creates a node with the given resource list and
|
||||
// returns a pointer and error status. If 'res' is nil, a predefined amount of
|
||||
// initNode returns a node with the given resource list and images. If 'res' is nil, a predefined amount of
|
||||
// resource will be used.
|
||||
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
|
||||
func initNode(name string, res *v1.ResourceList, images []v1.ContainerImage) *v1.Node {
|
||||
// if resource is nil, we use a default amount of resources for the node.
|
||||
if res == nil {
|
||||
res = &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
}
|
||||
}
|
||||
|
||||
n := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: *res,
|
||||
Images: images,
|
||||
},
|
||||
}
|
||||
return cs.CoreV1().Nodes().Create(n)
|
||||
return n
|
||||
}
|
||||
|
||||
// createNode creates a node with the given resource list.
|
||||
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Create(initNode(name, res, nil))
|
||||
}
|
||||
|
||||
// createNodeWithImages creates a node with the given resource list and images.
|
||||
func createNodeWithImages(cs clientset.Interface, name string, res *v1.ResourceList, images []v1.ContainerImage) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Create(initNode(name, res, images))
|
||||
}
|
||||
|
||||
// updateNodeStatus updates the status of node.
|
||||
@ -363,6 +378,44 @@ func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, nu
|
||||
return nodes[:], nil
|
||||
}
|
||||
|
||||
// nodeTainted return a condition function that returns true if the given node contains
|
||||
// the taints.
|
||||
func nodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(taints) != len(node.Spec.Taints) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, taint := range taints {
|
||||
if !taintutils.TaintExists(node.Spec.Taints, &taint) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// waitForNodeTaints waits for a node to have the target taints and returns
|
||||
// an error if it does not have taints within the given timeout.
|
||||
func waitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) error {
|
||||
return wait.Poll(100*time.Millisecond, 30*time.Second, nodeTainted(cs, node.Name, taints))
|
||||
}
|
||||
|
||||
// cleanupNodes deletes all nodes.
|
||||
func cleanupNodes(cs clientset.Interface, t *testing.T) {
|
||||
err := cs.CoreV1().Nodes().DeleteCollection(
|
||||
metav1.NewDeleteOptions(0), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("error while deleting all nodes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type pausePodConfig struct {
|
||||
Name string
|
||||
Namespace string
|
||||
@ -442,6 +495,43 @@ func runPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating pause pod: %v", err)
|
||||
}
|
||||
if err = waitForPodToSchedule(cs, pod); err != nil {
|
||||
return pod, fmt.Errorf("Pod %v/%v didn't schedule successfully. Error: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
|
||||
return pod, fmt.Errorf("Error getting pod %v/%v info: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
type podWithContainersConfig struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Containers []v1.Container
|
||||
}
|
||||
|
||||
// initPodWithContainers initializes a pod API object from the given config. This is used primarily for generating
|
||||
// pods with containers each having a specific image.
|
||||
func initPodWithContainers(cs clientset.Interface, conf *podWithContainersConfig) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: conf.Name,
|
||||
Namespace: conf.Namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: conf.Containers,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// runPodWithContainers creates a pod with given config and containers and waits
|
||||
// until it is scheduled. It returns its pointer and error status.
|
||||
func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating pod-with-containers: %v", err)
|
||||
}
|
||||
if err = waitForPodToSchedule(cs, pod); err != nil {
|
||||
return pod, fmt.Errorf("Pod %v didn't schedule successfully. Error: %v", pod.Name, err)
|
||||
}
|
||||
@ -539,20 +629,20 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// waitCachedPDBsStable waits for PDBs in scheduler cache to have "CurrentHealthy" status equal to
|
||||
// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
|
||||
// the expected values.
|
||||
func waitCachedPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
func waitForPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
pdbList, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(cachedPDBs) != len(pdbs) {
|
||||
if len(pdbList.Items) != len(pdbs) {
|
||||
return false, nil
|
||||
}
|
||||
for i, pdb := range pdbs {
|
||||
found := false
|
||||
for _, cpdb := range cachedPDBs {
|
||||
for _, cpdb := range pdbList.Items {
|
||||
if pdb.Name == cpdb.Name && pdb.Namespace == cpdb.Namespace {
|
||||
found = true
|
||||
if cpdb.Status.CurrentHealthy != pdbPodNum[i] {
|
||||
@ -638,3 +728,15 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
|
||||
t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) {
|
||||
schedulerCacheIsEmpty := func() (bool, error) {
|
||||
snapshot := sched.Cache().Snapshot()
|
||||
|
||||
return len(snapshot.Nodes) == 0 && len(snapshot.AssumedPods) == 0, nil
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {
|
||||
t.Errorf("Failed to wait for scheduler cache cleanup: %v", err)
|
||||
}
|
||||
}
|
||||
|
858
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
858
vendor/k8s.io/kubernetes/test/integration/scheduler/volume_binding_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
20
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
20
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
@ -14,12 +14,12 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
|
||||
deps = [
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -35,14 +35,14 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
16
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/OWNERS
generated
vendored
16
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/OWNERS
generated
vendored
@ -1,16 +1,4 @@
|
||||
approvers:
|
||||
- bsalamat
|
||||
- davidopp
|
||||
- gmarek
|
||||
- jayunit100
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
- sig-scheduling-maintainers
|
||||
reviewers:
|
||||
- bsalamat
|
||||
- davidopp
|
||||
- jayunit100
|
||||
- k82cn
|
||||
- ravisantoshgudimetla
|
||||
- sjug
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
- sig-scheduling
|
||||
|
138
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
138
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
@ -28,7 +28,11 @@ import (
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultNodeStrategy = &testutils.TrivialNodePrepareStrategy{}
|
||||
)
|
||||
|
||||
// BenchmarkScheduling benchmarks the scheduling rate when the cluster has
|
||||
@ -45,41 +49,90 @@ func BenchmarkScheduling(b *testing.B) {
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b)
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, defaultNodeStrategy, setupStrategy, testStrategy, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkSchedulingAntiAffinity benchmarks the scheduling rate of pods with
|
||||
// BenchmarkSchedulingPodAntiAffinity benchmarks the scheduling rate of pods with
|
||||
// PodAntiAffinity rules when the cluster has various quantities of nodes and
|
||||
// scheduled pods.
|
||||
func BenchmarkSchedulingAntiAffinity(b *testing.B) {
|
||||
func BenchmarkSchedulingPodAntiAffinity(b *testing.B) {
|
||||
tests := []struct{ nodes, existingPods, minPods int }{
|
||||
{nodes: 500, existingPods: 250, minPods: 250},
|
||||
{nodes: 500, existingPods: 5000, minPods: 250},
|
||||
{nodes: 1000, existingPods: 1000, minPods: 500},
|
||||
}
|
||||
// The setup strategy creates pods with no affinity rules.
|
||||
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup")
|
||||
// The test strategy creates pods with anti-affinity for each other.
|
||||
testBasePod := makeBasePodWithAntiAffinity(
|
||||
testBasePod := makeBasePodWithPodAntiAffinity(
|
||||
map[string]string{"name": "test", "color": "green"},
|
||||
map[string]string{"color": "green"})
|
||||
// The test strategy creates pods with anti-affinity for each other.
|
||||
testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b)
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, defaultNodeStrategy, setupStrategy, testStrategy, b)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// makeBasePodWithAntiAffinity creates a Pod object to be used as a template.
|
||||
// BenchmarkSchedulingPodAffinity benchmarks the scheduling rate of pods with
|
||||
// PodAffinity rules when the cluster has various quantities of nodes and
|
||||
// scheduled pods.
|
||||
func BenchmarkSchedulingPodAffinity(b *testing.B) {
|
||||
tests := []struct{ nodes, existingPods, minPods int }{
|
||||
{nodes: 500, existingPods: 250, minPods: 250},
|
||||
{nodes: 500, existingPods: 5000, minPods: 250},
|
||||
{nodes: 1000, existingPods: 1000, minPods: 500},
|
||||
}
|
||||
// The setup strategy creates pods with no affinity rules.
|
||||
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup")
|
||||
testBasePod := makeBasePodWithPodAffinity(
|
||||
map[string]string{"foo": ""},
|
||||
map[string]string{"foo": ""},
|
||||
)
|
||||
// The test strategy creates pods with affinity for each other.
|
||||
testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
|
||||
nodeStrategy := testutils.NewLabelNodePrepareStrategy(apis.LabelZoneFailureDomain, "zone1")
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, nodeStrategy, setupStrategy, testStrategy, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkSchedulingNodeAffinity benchmarks the scheduling rate of pods with
|
||||
// NodeAffinity rules when the cluster has various quantities of nodes and
|
||||
// scheduled pods.
|
||||
func BenchmarkSchedulingNodeAffinity(b *testing.B) {
|
||||
tests := []struct{ nodes, existingPods, minPods int }{
|
||||
{nodes: 500, existingPods: 250, minPods: 250},
|
||||
{nodes: 500, existingPods: 5000, minPods: 250},
|
||||
{nodes: 1000, existingPods: 1000, minPods: 500},
|
||||
}
|
||||
// The setup strategy creates pods with no affinity rules.
|
||||
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup")
|
||||
testBasePod := makeBasePodWithNodeAffinity(apis.LabelZoneFailureDomain, []string{"zone1", "zone2"})
|
||||
// The test strategy creates pods with node-affinity for each other.
|
||||
testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
|
||||
nodeStrategy := testutils.NewLabelNodePrepareStrategy(apis.LabelZoneFailureDomain, "zone1")
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, nodeStrategy, setupStrategy, testStrategy, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// makeBasePodWithPodAntiAffinity creates a Pod object to be used as a template.
|
||||
// The Pod has a PodAntiAffinity requirement against pods with the given labels.
|
||||
func makeBasePodWithAntiAffinity(podLabels, affinityLabels map[string]string) *v1.Pod {
|
||||
func makeBasePodWithPodAntiAffinity(podLabels, affinityLabels map[string]string) *v1.Pod {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "affinity-pod-",
|
||||
GenerateName: "anit-affinity-pod-",
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: testutils.MakePodSpec(),
|
||||
@ -99,11 +152,66 @@ func makeBasePodWithAntiAffinity(podLabels, affinityLabels map[string]string) *v
|
||||
return basePod
|
||||
}
|
||||
|
||||
// makeBasePodWithPodAffinity creates a Pod object to be used as a template.
|
||||
// The Pod has a PodAffinity requirement against pods with the given labels.
|
||||
func makeBasePodWithPodAffinity(podLabels, affinityZoneLabels map[string]string) *v1.Pod {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "affinity-pod-",
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: testutils.MakePodSpec(),
|
||||
}
|
||||
basePod.Spec.Affinity = &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: affinityZoneLabels,
|
||||
},
|
||||
TopologyKey: apis.LabelZoneFailureDomain,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return basePod
|
||||
}
|
||||
|
||||
// makeBasePodWithNodeAffinity creates a Pod object to be used as a template.
|
||||
// The Pod has a NodeAffinity requirement against nodes with the given expressions.
|
||||
func makeBasePodWithNodeAffinity(key string, vals []string) *v1.Pod {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "node-affinity-",
|
||||
},
|
||||
Spec: testutils.MakePodSpec(),
|
||||
}
|
||||
basePod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: key,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: vals,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return basePod
|
||||
}
|
||||
|
||||
// benchmarkScheduling benchmarks scheduling rate with specific number of nodes
|
||||
// and specific number of pods already scheduled.
|
||||
// This will schedule numExistingPods pods before the benchmark starts, and at
|
||||
// least minPods pods during the benchmark.
|
||||
func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
nodeStrategy testutils.PrepareNodeStrategy,
|
||||
setupPodStrategy, testPodStrategy testutils.TestPodCreateStrategy,
|
||||
b *testing.B) {
|
||||
if b.N < minPods {
|
||||
@ -115,11 +223,11 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
|
||||
nodePreparer := framework.NewIntegrationTestNodePreparer(
|
||||
c,
|
||||
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
|
||||
[]testutils.CountToStrategy{{Count: numNodes, Strategy: nodeStrategy}},
|
||||
"scheduler-perf-",
|
||||
)
|
||||
if err := nodePreparer.PrepareNodes(); err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
defer nodePreparer.CleanupNodes()
|
||||
|
||||
@ -131,7 +239,7 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
for {
|
||||
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= numExistingPods {
|
||||
break
|
||||
@ -149,7 +257,7 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= numExistingPods+b.N {
|
||||
break
|
||||
|
16
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
@ -18,12 +18,12 @@ package benchmark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"math"
|
||||
"strconv"
|
||||
@ -105,7 +105,7 @@ type testConfig struct {
|
||||
numNodes int
|
||||
mutatedNodeTemplate *v1.Node
|
||||
mutatedPodTemplate *v1.Pod
|
||||
schedulerSupportFunctions scheduler.Configurator
|
||||
schedulerSupportFunctions factory.Configurator
|
||||
destroyFunc func()
|
||||
}
|
||||
|
||||
@ -137,7 +137,7 @@ func schedulePods(config *testConfig) int32 {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
// 30,000 pods -> wait till @ least 300 are scheduled to start measuring.
|
||||
// TODO Find out why sometimes there may be scheduling blips in the beginning.
|
||||
@ -155,15 +155,19 @@ func schedulePods(config *testConfig) int32 {
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
// We will be completed when all pods are done being scheduled.
|
||||
// return the worst-case-scenario interval that was seen during this time.
|
||||
// Note this should never be low due to cold-start, so allow bake in sched time if necessary.
|
||||
if len(scheduled) >= config.numPods {
|
||||
consumed := int(time.Since(start) / time.Second)
|
||||
if consumed <= 0 {
|
||||
consumed = 1
|
||||
}
|
||||
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average). min QPS was %v\n",
|
||||
config.numPods, int(time.Since(start)/time.Second), config.numPods/int(time.Since(start)/time.Second), minQps)
|
||||
config.numPods, consumed, config.numPods/consumed, minQps)
|
||||
return minQps
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
@ -32,7 +32,7 @@ import (
|
||||
// remove resources after finished.
|
||||
// Notes on rate limiter:
|
||||
// - client rate limit is set to 5000.
|
||||
func mustSetupScheduler() (scheduler.Configurator, util.ShutdownFunc) {
|
||||
func mustSetupScheduler() (factory.Configurator, util.ShutdownFunc) {
|
||||
apiURL, apiShutdown := util.StartApiserver()
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: apiURL,
|
||||
|
10
vendor/k8s.io/kubernetes/test/integration/secrets/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/secrets/BUILD
generated
vendored
@ -14,13 +14,13 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
35
vendor/k8s.io/kubernetes/test/integration/serviceaccount/BUILD
generated
vendored
35
vendor/k8s.io/kubernetes/test/integration/serviceaccount/BUILD
generated
vendored
@ -14,30 +14,27 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/serviceaccount:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//plugin/pkg/admission/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/request/union:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/request/union:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
29
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
@ -21,6 +21,7 @@ package serviceaccount
|
||||
// to work for any client of the HTTP interface.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
@ -43,14 +44,12 @@ import (
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
serviceaccountadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
@ -363,19 +362,19 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
// Root client
|
||||
// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
|
||||
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
internalRootClientset := internalclientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
externalRootClientset := kubernetes.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, BearerToken: rootToken})
|
||||
// Set up two authenticators:
|
||||
// 1. A token authenticator that maps the rootToken to the "root" user
|
||||
// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
|
||||
rootTokenAuth := authenticator.TokenFunc(func(token string) (user.Info, bool, error) {
|
||||
rootTokenAuth := authenticator.TokenFunc(func(ctx context.Context, token string) (*authenticator.Response, bool, error) {
|
||||
if token == rootToken {
|
||||
return &user.DefaultInfo{Name: rootUserName}, true, nil
|
||||
return &authenticator.Response{User: &user.DefaultInfo{Name: rootUserName}}, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
})
|
||||
serviceAccountKey, _ := rsa.GenerateKey(rand.Reader, 2048)
|
||||
serviceAccountTokenGetter := serviceaccountcontroller.NewGetterFromClient(rootClientset)
|
||||
serviceAccountTokenAuth := serviceaccount.JWTTokenAuthenticator(serviceaccount.LegacyIssuer, []interface{}{&serviceAccountKey.PublicKey}, serviceaccount.NewLegacyValidator(true, serviceAccountTokenGetter))
|
||||
serviceAccountTokenAuth := serviceaccount.JWTTokenAuthenticator(serviceaccount.LegacyIssuer, []interface{}{&serviceAccountKey.PublicKey}, nil, serviceaccount.NewLegacyValidator(true, serviceAccountTokenGetter))
|
||||
authenticator := union.New(
|
||||
bearertoken.New(rootTokenAuth),
|
||||
bearertoken.New(serviceAccountTokenAuth),
|
||||
@ -418,9 +417,9 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
|
||||
// Set up admission plugin to auto-assign serviceaccounts to pods
|
||||
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount()
|
||||
serviceAccountAdmission.SetInternalKubeClientSet(internalRootClientset)
|
||||
internalInformers := internalinformers.NewSharedInformerFactory(internalRootClientset, controller.NoResyncPeriodFunc())
|
||||
serviceAccountAdmission.SetInternalKubeInformerFactory(internalInformers)
|
||||
serviceAccountAdmission.SetExternalKubeClientSet(externalRootClientset)
|
||||
externalInformers := informers.NewSharedInformerFactory(externalRootClientset, controller.NoResyncPeriodFunc())
|
||||
serviceAccountAdmission.SetExternalKubeInformerFactory(externalInformers)
|
||||
informers := informers.NewSharedInformerFactory(rootClientset, controller.NoResyncPeriodFunc())
|
||||
|
||||
masterConfig := framework.NewMasterConfig()
|
||||
@ -437,19 +436,21 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
apiServer.Close()
|
||||
}
|
||||
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("serviceaccount_tokens_controller")
|
||||
tokenGenerator, err := serviceaccount.JWTTokenGenerator(serviceaccount.LegacyIssuer, serviceAccountKey)
|
||||
if err != nil {
|
||||
return rootClientset, clientConfig, stop, err
|
||||
}
|
||||
tokenController, err := serviceaccountcontroller.NewTokensController(
|
||||
informers.Core().V1().ServiceAccounts(),
|
||||
informers.Core().V1().Secrets(),
|
||||
rootClientset,
|
||||
serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceaccount.LegacyIssuer, serviceAccountKey)},
|
||||
serviceaccountcontroller.TokensControllerOptions{TokenGenerator: tokenGenerator},
|
||||
)
|
||||
if err != nil {
|
||||
return rootClientset, clientConfig, stop, err
|
||||
}
|
||||
go tokenController.Run(1, stopCh)
|
||||
|
||||
metrics.UnregisterMetricAndUntrackRateLimiterUsage("serviceaccount_controller")
|
||||
serviceAccountController, err := serviceaccountcontroller.NewServiceAccountsController(
|
||||
informers.Core().V1().ServiceAccounts(),
|
||||
informers.Core().V1().Namespaces(),
|
||||
@ -460,7 +461,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
||||
return rootClientset, clientConfig, stop, err
|
||||
}
|
||||
informers.Start(stopCh)
|
||||
internalInformers.Start(stopCh)
|
||||
externalInformers.Start(stopCh)
|
||||
go serviceAccountController.Run(5, stopCh)
|
||||
|
||||
return rootClientset, clientConfig, stop, nil
|
||||
|
46
vendor/k8s.io/kubernetes/test/integration/serving/BUILD
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/test/integration/serving/BUILD
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"serving_test.go",
|
||||
],
|
||||
tags = [
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//cmd/cloud-controller-manager/app/testing:go_default_library",
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//cmd/kube-controller-manager/app/testing:go_default_library",
|
||||
"//cmd/kube-scheduler/app/testing:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
27
vendor/k8s.io/kubernetes/test/integration/serving/main_test.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/serving/main_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serving
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
364
vendor/k8s.io/kubernetes/test/integration/serving/serving_test.go
generated
vendored
Normal file
364
vendor/k8s.io/kubernetes/test/integration/serving/serving_test.go
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serving
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/options"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/cloud-provider"
|
||||
cloudctrlmgrtesting "k8s.io/kubernetes/cmd/cloud-controller-manager/app/testing"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
kubectrlmgrtesting "k8s.io/kubernetes/cmd/kube-controller-manager/app/testing"
|
||||
kubeschedulertesting "k8s.io/kubernetes/cmd/kube-scheduler/app/testing"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type componentTester interface {
|
||||
StartTestServer(t kubectrlmgrtesting.Logger, customFlags []string) (*options.SecureServingOptionsWithLoopback, *server.SecureServingInfo, *server.DeprecatedInsecureServingInfo, func(), error)
|
||||
}
|
||||
|
||||
type kubeControllerManagerTester struct{}
|
||||
|
||||
func (kubeControllerManagerTester) StartTestServer(t kubectrlmgrtesting.Logger, customFlags []string) (*options.SecureServingOptionsWithLoopback, *server.SecureServingInfo, *server.DeprecatedInsecureServingInfo, func(), error) {
|
||||
gotResult, err := kubectrlmgrtesting.StartTestServer(t, customFlags)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
return gotResult.Options.SecureServing, gotResult.Config.SecureServing, gotResult.Config.InsecureServing, gotResult.TearDownFn, err
|
||||
}
|
||||
|
||||
type cloudControllerManagerTester struct{}
|
||||
|
||||
func (cloudControllerManagerTester) StartTestServer(t kubectrlmgrtesting.Logger, customFlags []string) (*options.SecureServingOptionsWithLoopback, *server.SecureServingInfo, *server.DeprecatedInsecureServingInfo, func(), error) {
|
||||
gotResult, err := cloudctrlmgrtesting.StartTestServer(t, customFlags)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
return gotResult.Options.SecureServing, gotResult.Config.SecureServing, gotResult.Config.InsecureServing, gotResult.TearDownFn, err
|
||||
}
|
||||
|
||||
type kubeSchedulerTester struct{}
|
||||
|
||||
func (kubeSchedulerTester) StartTestServer(t kubectrlmgrtesting.Logger, customFlags []string) (*options.SecureServingOptionsWithLoopback, *server.SecureServingInfo, *server.DeprecatedInsecureServingInfo, func(), error) {
|
||||
gotResult, err := kubeschedulertesting.StartTestServer(t, customFlags)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
return gotResult.Options.SecureServing, gotResult.Config.SecureServing, gotResult.Config.InsecureServing, gotResult.TearDownFn, err
|
||||
}
|
||||
|
||||
func TestComponentSecureServingAndAuth(t *testing.T) {
|
||||
if !cloudprovider.IsCloudProvider("fake") {
|
||||
cloudprovider.RegisterCloudProvider("fake", fakeCloudProviderFactory)
|
||||
}
|
||||
|
||||
// Insulate this test from picking up in-cluster config when run inside a pod
|
||||
// We can't assume we have permissions to write to /var/run/secrets/... from a unit test to mock in-cluster config for testing
|
||||
originalHost := os.Getenv("KUBERNETES_SERVICE_HOST")
|
||||
if len(originalHost) > 0 {
|
||||
os.Setenv("KUBERNETES_SERVICE_HOST", "")
|
||||
defer os.Setenv("KUBERNETES_SERVICE_HOST", originalHost)
|
||||
}
|
||||
|
||||
// authenticate to apiserver via bearer token
|
||||
token := "flwqkenfjasasdfmwerasd"
|
||||
tokenFile, err := ioutil.TempFile("", "kubeconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tokenFile.WriteString(fmt.Sprintf(`
|
||||
%s,controller-manager,controller-manager,""
|
||||
`, token))
|
||||
tokenFile.Close()
|
||||
|
||||
// start apiserver
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{
|
||||
"--token-auth-file", tokenFile.Name(),
|
||||
"--authorization-mode", "RBAC",
|
||||
}, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
// allow controller-manager to do SubjectAccessReview
|
||||
client, err := kubernetes.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating client config: %v", err)
|
||||
}
|
||||
_, err = client.RbacV1().ClusterRoleBindings().Create(&rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "controller-manager:system:auth-delegator"},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
Kind: "User",
|
||||
Name: "controller-manager",
|
||||
}},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "system:auth-delegator",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create system:auth-delegator rbac cluster role binding: %v", err)
|
||||
}
|
||||
|
||||
// allow controller-manager to read kube-system/extension-apiserver-authentication
|
||||
_, err = client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "controller-manager:extension-apiserver-authentication-reader"},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
Kind: "User",
|
||||
Name: "controller-manager",
|
||||
}},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: "extension-apiserver-authentication-reader",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create controller-manager:extension-apiserver-authentication-reader rbac role binding: %v", err)
|
||||
}
|
||||
|
||||
// create kubeconfig for the apiserver
|
||||
apiserverConfig, err := ioutil.TempFile("", "kubeconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
apiserverConfig.WriteString(fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: %s
|
||||
certificate-authority: %s
|
||||
name: integration
|
||||
contexts:
|
||||
- context:
|
||||
cluster: integration
|
||||
user: controller-manager
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
users:
|
||||
- name: controller-manager
|
||||
user:
|
||||
token: %s
|
||||
`, server.ClientConfig.Host, server.ServerOpts.SecureServing.ServerCert.CertKey.CertFile, token))
|
||||
apiserverConfig.Close()
|
||||
|
||||
// create BROKEN kubeconfig for the apiserver
|
||||
brokenApiserverConfig, err := ioutil.TempFile("", "kubeconfig")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
brokenApiserverConfig.WriteString(fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: %s
|
||||
certificate-authority: %s
|
||||
name: integration
|
||||
contexts:
|
||||
- context:
|
||||
cluster: integration
|
||||
user: controller-manager
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
users:
|
||||
- name: controller-manager
|
||||
user:
|
||||
token: WRONGTOKEN
|
||||
`, server.ClientConfig.Host, server.ServerOpts.SecureServing.ServerCert.CertKey.CertFile))
|
||||
brokenApiserverConfig.Close()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tester componentTester
|
||||
extraFlags []string
|
||||
}{
|
||||
{"kube-controller-manager", kubeControllerManagerTester{}, nil},
|
||||
{"cloud-controller-manager", cloudControllerManagerTester{}, []string{"--cloud-provider=fake"}},
|
||||
{"kube-scheduler", kubeSchedulerTester{}, nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testComponent(t, tt.tester, apiserverConfig.Name(), brokenApiserverConfig.Name(), token, tt.extraFlags)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testComponent(t *testing.T, tester componentTester, kubeconfig, brokenKubeconfig, token string, extraFlags []string) {
|
||||
tests := []struct {
|
||||
name string
|
||||
flags []string
|
||||
path string
|
||||
anonymous bool // to use the token or not
|
||||
wantErr bool
|
||||
wantSecureCode, wantInsecureCode *int
|
||||
}{
|
||||
{"no-flags", nil, "/healthz", false, true, nil, nil},
|
||||
{"insecurely /healthz", []string{
|
||||
"--secure-port=0",
|
||||
"--port=10253",
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", true, false, nil, intPtr(http.StatusOK)},
|
||||
{"insecurely /metrics", []string{
|
||||
"--secure-port=0",
|
||||
"--port=10253",
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", true, false, nil, intPtr(http.StatusOK)},
|
||||
{"/healthz without authn/authz", []string{
|
||||
"--port=0",
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", true, false, intPtr(http.StatusOK), nil},
|
||||
{"/metrics without authn/authz", []string{
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
"--port=10253",
|
||||
}, "/metrics", true, false, intPtr(http.StatusForbidden), intPtr(http.StatusOK)},
|
||||
{"authorization skipped for /healthz with authn/authz", []string{
|
||||
"--port=0",
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", kubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", false, false, intPtr(http.StatusOK), nil},
|
||||
{"authorization skipped for /healthz with BROKEN authn/authz", []string{
|
||||
"--port=0",
|
||||
"--authentication-skip-lookup", // to survive unaccessible extensions-apiserver-authentication configmap
|
||||
"--authentication-kubeconfig", brokenKubeconfig,
|
||||
"--authorization-kubeconfig", brokenKubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/healthz", false, false, intPtr(http.StatusOK), nil},
|
||||
{"not authorized /metrics", []string{
|
||||
"--port=0",
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", kubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", false, false, intPtr(http.StatusForbidden), nil},
|
||||
{"not authorized /metrics with BROKEN authn/authz", []string{
|
||||
"--port=10253",
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", brokenKubeconfig,
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", false, false, intPtr(http.StatusInternalServerError), intPtr(http.StatusOK)},
|
||||
{"always-allowed /metrics with BROKEN authn/authz", []string{
|
||||
"--port=0",
|
||||
"--authentication-skip-lookup", // to survive unaccessible extensions-apiserver-authentication configmap
|
||||
"--authentication-kubeconfig", kubeconfig,
|
||||
"--authorization-kubeconfig", kubeconfig,
|
||||
"--authorization-always-allow-paths", "/healthz,/metrics",
|
||||
"--kubeconfig", kubeconfig,
|
||||
"--leader-elect=false",
|
||||
}, "/metrics", false, false, intPtr(http.StatusOK), nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
secureOptions, secureInfo, insecureInfo, tearDownFn, err := tester.StartTestServer(t, append(append([]string{}, tt.flags...), extraFlags...))
|
||||
if tearDownFn != nil {
|
||||
defer tearDownFn()
|
||||
}
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Fatalf("StartTestServer() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if want, got := tt.wantSecureCode != nil, secureInfo != nil; want != got {
|
||||
t.Errorf("SecureServing enabled: expected=%v got=%v", want, got)
|
||||
} else if want {
|
||||
url := fmt.Sprintf("https://%s%s", secureInfo.Listener.Addr().String(), tt.path)
|
||||
url = strings.Replace(url, "[::]", "127.0.0.1", -1) // switch to IPv4 because the self-signed cert does not support [::]
|
||||
|
||||
// read self-signed server cert disk
|
||||
pool := x509.NewCertPool()
|
||||
serverCertPath := path.Join(secureOptions.ServerCert.CertDirectory, secureOptions.ServerCert.PairName+".crt")
|
||||
serverCert, err := ioutil.ReadFile(serverCertPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read component server cert %q: %v", serverCertPath, err)
|
||||
}
|
||||
pool.AppendCertsFromPEM(serverCert)
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: pool,
|
||||
},
|
||||
}
|
||||
|
||||
client := &http.Client{Transport: tr}
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !tt.anonymous {
|
||||
req.Header.Add("Authorization", fmt.Sprintf("Token %s", token))
|
||||
}
|
||||
r, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to GET %s from component: %v", tt.path, err)
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
defer r.Body.Close()
|
||||
if got, expected := r.StatusCode, *tt.wantSecureCode; got != expected {
|
||||
t.Fatalf("expected http %d at %s of component, got: %d %q", expected, tt.path, got, string(body))
|
||||
}
|
||||
}
|
||||
|
||||
if want, got := tt.wantInsecureCode != nil, insecureInfo != nil; want != got {
|
||||
t.Errorf("InsecureServing enabled: expected=%v got=%v", want, got)
|
||||
} else if want {
|
||||
url := fmt.Sprintf("http://%s%s", insecureInfo.Listener.Addr().String(), tt.path)
|
||||
r, err := http.Get(url)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to GET %s from component: %v", tt.path, err)
|
||||
}
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
defer r.Body.Close()
|
||||
if got, expected := r.StatusCode, *tt.wantInsecureCode; got != expected {
|
||||
t.Fatalf("expected http %d at %s of component, got: %d %q", expected, tt.path, got, string(body))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func intPtr(x int) *int {
|
||||
return &x
|
||||
}
|
||||
|
||||
func fakeCloudProviderFactory(io.Reader) (cloudprovider.Interface, error) {
|
||||
return &fake.FakeCloud{}, nil
|
||||
}
|
32
vendor/k8s.io/kubernetes/test/integration/statefulset/BUILD
generated
vendored
32
vendor/k8s.io/kubernetes/test/integration/statefulset/BUILD
generated
vendored
@ -8,19 +8,19 @@ go_library(
|
||||
deps = [
|
||||
#"//pkg/api:go_default_library",
|
||||
"//pkg/controller/statefulset:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -34,11 +34,11 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/integration/statefulset/util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/statefulset/util.go
generated
vendored
@ -79,7 +79,7 @@ func newHeadlessService(namespace string) *v1.Service {
|
||||
}
|
||||
}
|
||||
|
||||
// newSTS returns a StatefulSet with with a fake container image
|
||||
// newSTS returns a StatefulSet with a fake container image
|
||||
func newSTS(name, namespace string, replicas int) *v1beta1.StatefulSet {
|
||||
replicasCopy := int32(replicas)
|
||||
return &v1beta1.StatefulSet{
|
||||
|
14
vendor/k8s.io/kubernetes/test/integration/storageclasses/BUILD
generated
vendored
14
vendor/k8s.io/kubernetes/test/integration/storageclasses/BUILD
generated
vendored
@ -14,14 +14,14 @@ go_test(
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user