mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor updates
This commit is contained in:
7
vendor/k8s.io/kubernetes/test/e2e/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/BUILD
generated
vendored
@ -9,8 +9,7 @@ load(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["e2e_test.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["e2e"],
|
||||
deps = [
|
||||
"//test/e2e/apimachinery:go_default_library",
|
||||
@ -23,7 +22,6 @@ go_test(
|
||||
"//test/e2e/kubectl:go_default_library",
|
||||
"//test/e2e/lifecycle:go_default_library",
|
||||
"//test/e2e/lifecycle/bootstrap:go_default_library",
|
||||
"//test/e2e/multicluster:go_default_library",
|
||||
"//test/e2e/network:go_default_library",
|
||||
"//test/e2e/node:go_default_library",
|
||||
"//test/e2e/scalability:go_default_library",
|
||||
@ -48,7 +46,6 @@ go_library(
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/kubectl/util/logs:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
@ -71,6 +68,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -109,7 +107,6 @@ filegroup(
|
||||
"//test/e2e/kubectl:all-srcs",
|
||||
"//test/e2e/lifecycle:all-srcs",
|
||||
"//test/e2e/manifest:all-srcs",
|
||||
"//test/e2e/multicluster:all-srcs",
|
||||
"//test/e2e/network:all-srcs",
|
||||
"//test/e2e/node:all-srcs",
|
||||
"//test/e2e/perftype:all-srcs",
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD
generated
vendored
@ -38,6 +38,7 @@ go_library(
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/authorization/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
@ -49,7 +50,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
|
40
vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go
generated
vendored
40
vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go
generated
vendored
@ -41,6 +41,7 @@ import (
|
||||
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -52,14 +53,20 @@ var _ = SIGDescribe("Aggregator", func() {
|
||||
var ns string
|
||||
var c clientset.Interface
|
||||
var aggrclient *aggregatorclient.Clientset
|
||||
f := framework.NewDefaultFramework("aggregator")
|
||||
framework.AddCleanupAction(func() {
|
||||
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
|
||||
if len(ns) > 0 {
|
||||
cleanTest(c, aggrclient, ns)
|
||||
}
|
||||
|
||||
// BeforeEachs run in LIFO order, AfterEachs run in FIFO order.
|
||||
// We want cleanTest to happen before the namespace cleanup AfterEach
|
||||
// inserted by NewDefaultFramework, so we put this AfterEach in front
|
||||
// of NewDefaultFramework.
|
||||
AfterEach(func() {
|
||||
cleanTest(c, aggrclient, ns)
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("aggregator")
|
||||
|
||||
// We want namespace initialization BeforeEach inserted by
|
||||
// NewDefaultFramework to happen before this, so we put this BeforeEach
|
||||
// after NewDefaultFramework.
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
@ -72,7 +79,7 @@ var _ = SIGDescribe("Aggregator", func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
// Testing a 1.7 version of the sample-apiserver
|
||||
TestSampleAPIServer(f, "gcr.io/kubernetes-e2e-test-images/k8s-aggregator-sample-apiserver-amd64:1.7v2")
|
||||
TestSampleAPIServer(f, imageutils.GetE2EImage(imageutils.APIServer))
|
||||
})
|
||||
})
|
||||
|
||||
@ -126,7 +133,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
|
||||
// kubectl create -f deploy.yaml
|
||||
deploymentName := "sample-apiserver-deployment"
|
||||
etcdImage := "quay.io/coreos/etcd:v3.1.10"
|
||||
etcdImage := "quay.io/coreos/etcd:v3.2.14"
|
||||
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
|
||||
replicas := int32(1)
|
||||
zero := int64(0)
|
||||
@ -284,6 +291,13 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
})
|
||||
framework.ExpectNoError(err, "creating role binding %s:sample-apiserver to access configMap", namespace)
|
||||
|
||||
// Wait for the extension apiserver to be up and healthy
|
||||
// kubectl get deployments -n <aggregated-api-namespace> && status == Running
|
||||
// NOTE: aggregated apis should generally be set up in there own namespace (<aggregated-api-namespace>). As the test framework
|
||||
// is setting up a new namespace, we are just using that.
|
||||
err = framework.WaitForDeploymentComplete(client, deployment)
|
||||
framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace)
|
||||
|
||||
// kubectl create -f apiservice.yaml
|
||||
_, err = aggrclient.ApiregistrationV1beta1().APIServices().Create(&apiregistrationv1beta1.APIService{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.k8s.io"},
|
||||
@ -301,13 +315,6 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
})
|
||||
framework.ExpectNoError(err, "creating apiservice %s with namespace %s", "v1alpha1.wardle.k8s.io", namespace)
|
||||
|
||||
// Wait for the extension apiserver to be up and healthy
|
||||
// kubectl get deployments -n <aggregated-api-namespace> && status == Running
|
||||
// NOTE: aggregated apis should generally be set up in there own namespace (<aggregated-api-namespace>). As the test framework
|
||||
// is setting up a new namespace, we are just using that.
|
||||
err = framework.WaitForDeploymentComplete(client, deployment)
|
||||
|
||||
// We seem to need to do additional waiting until the extension api service is actually up.
|
||||
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
request := restClient.Get().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders")
|
||||
request.SetHeader("Accept", "application/json")
|
||||
@ -317,6 +324,9 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
if !ok {
|
||||
return false, err
|
||||
}
|
||||
if status.Status().Code == 503 {
|
||||
return false, nil
|
||||
}
|
||||
if status.Status().Code == 404 && strings.HasPrefix(err.Error(), "the server could not find the requested resource") {
|
||||
return false, nil
|
||||
}
|
||||
|
71
vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go
generated
vendored
71
vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go
generated
vendored
@ -83,13 +83,8 @@ func getBackgroundOptions() *metav1.DeleteOptions {
|
||||
}
|
||||
|
||||
func getOrphanOptions() *metav1.DeleteOptions {
|
||||
var trueVar = true
|
||||
return &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
}
|
||||
|
||||
func getNonOrphanOptions() *metav1.DeleteOptions {
|
||||
var falseVar = false
|
||||
return &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
policy := metav1.DeletePropagationOrphan
|
||||
return &metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
}
|
||||
|
||||
var (
|
||||
@ -255,7 +250,7 @@ func verifyRemainingCronJobsJobsPods(f *framework.Framework, clientSet clientset
|
||||
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", cjNum, len(cronJobs.Items)))
|
||||
}
|
||||
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list jobs: %v", err)
|
||||
}
|
||||
@ -330,7 +325,14 @@ func newCronJob(name, schedule string) *batchv1beta1.CronJob {
|
||||
|
||||
var _ = SIGDescribe("Garbage collector", func() {
|
||||
f := framework.NewDefaultFramework("gc")
|
||||
It("should delete pods created by rc when not orphaning", func() {
|
||||
|
||||
/*
|
||||
Testname: garbage-collector-delete-rc--propagation-background
|
||||
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
|
||||
then deleting a ReplicationController should cause pods created
|
||||
by that RC to also be deleted.
|
||||
*/
|
||||
framework.ConformanceIt("should delete pods created by rc when not orphaning", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
@ -362,7 +364,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
framework.Failf("failed to wait for the rc to create some pods: %v", err)
|
||||
}
|
||||
By("delete the rc")
|
||||
deleteOptions := getNonOrphanOptions()
|
||||
deleteOptions := getBackgroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
@ -383,7 +385,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
It("should orphan pods created by rc if delete options say so", func() {
|
||||
/*
|
||||
Testname: garbage-collector-delete-rc--propagation-orphan
|
||||
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
|
||||
then deleting a ReplicationController should cause pods created
|
||||
by that RC to be orphaned.
|
||||
*/
|
||||
framework.ConformanceIt("should orphan pods created by rc if delete options say so", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
@ -501,7 +509,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
It("should delete RS created by deployment when not orphaning", func() {
|
||||
/*
|
||||
Testname: garbage-collector-delete-deployment-propagation-background
|
||||
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
|
||||
then deleting a Deployment should cause ReplicaSets created
|
||||
by that Deployment to also be deleted.
|
||||
*/
|
||||
framework.ConformanceIt("should delete RS created by deployment when not orphaning", func() {
|
||||
clientSet := f.ClientSet
|
||||
deployClient := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name)
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name)
|
||||
@ -529,7 +543,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
|
||||
By("delete the deployment")
|
||||
deleteOptions := getNonOrphanOptions()
|
||||
deleteOptions := getBackgroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the deployment: %v", err)
|
||||
@ -552,7 +566,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
It("should orphan RS created by deployment when deleteOptions.OrphanDependents is true", func() {
|
||||
/*
|
||||
Testname: garbage-collector-delete-deployment-propagation-true
|
||||
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
|
||||
then deleting a Deployment should cause ReplicaSets created
|
||||
by that Deployment to be orphaned.
|
||||
*/
|
||||
framework.ConformanceIt("should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan", func() {
|
||||
clientSet := f.ClientSet
|
||||
deployClient := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name)
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name)
|
||||
@ -617,7 +637,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
It("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() {
|
||||
/*
|
||||
Testname: garbage-collector-delete-rc-after-owned-pods
|
||||
Description: Ensure that if deleteOptions.PropagationPolicy is set to Foreground,
|
||||
then a ReplicationController should not be deleted until all its dependent pods are deleted.
|
||||
*/
|
||||
framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
@ -701,7 +726,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
})
|
||||
|
||||
// TODO: this should be an integration test
|
||||
It("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() {
|
||||
/*
|
||||
Testname: garbage-collector-multiple-owners
|
||||
Description: Ensure that if a Pod has multiple valid owners, it will not be deleted
|
||||
when one of of those owners gets deleted.
|
||||
*/
|
||||
framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
@ -812,7 +842,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
})
|
||||
|
||||
// TODO: should be an integration test
|
||||
It("should not be blocked by dependency circle", func() {
|
||||
/*
|
||||
Testname: garbage-collector-dependency-cycle
|
||||
Description: Ensure that a dependency cycle will
|
||||
not block the garbage collector.
|
||||
*/
|
||||
framework.ConformanceIt("should not be blocked by dependency circle", func() {
|
||||
clientSet := f.ClientSet
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
pod1 := newGCPod("pod1")
|
||||
@ -974,7 +1009,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
|
||||
By("Wait for the CronJob to create new Job")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list jobs: %v", err)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/apimachinery/initializers.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/apimachinery/initializers.go
generated
vendored
@ -315,7 +315,7 @@ func newReplicaset() *v1beta1.ReplicaSet {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name + "-container",
|
||||
Image: "gcr.io/google_containers/porter:4524579c0eb935c056c8e75563b4e1eda31587e0",
|
||||
Image: imageutils.GetE2EImage(imageutils.Porter),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -405,7 +405,7 @@ func cleanupInitializer(c clientset.Interface, initializerConfigName, initialize
|
||||
// waits till the RS status.observedGeneration matches metadata.generation.
|
||||
func waitForRSObservedGeneration(c clientset.Interface, ns, name string, generation int64) error {
|
||||
return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
32
vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go
generated
vendored
@ -24,11 +24,13 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1alpha1 "k8s.io/apimachinery/pkg/apis/meta/v1alpha1"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -47,8 +49,8 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
table := &metav1alpha1.Table{}
|
||||
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
|
||||
table := &metav1beta1.Table{}
|
||||
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Table: %#v", table)
|
||||
|
||||
@ -92,10 +94,10 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
Fail("Unable to create template %d, exiting", i)
|
||||
})
|
||||
|
||||
pagedTable := &metav1alpha1.Table{}
|
||||
pagedTable := &metav1beta1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates").
|
||||
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
|
||||
@ -111,7 +113,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
|
||||
err = c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates").
|
||||
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(pagedTable.Rows)).To(BeNumerically(">", 0))
|
||||
@ -121,8 +123,8 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
It("should return generic metadata details across all namespaces for nodes", func() {
|
||||
c := f.ClientSet
|
||||
|
||||
table := &metav1alpha1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
|
||||
table := &metav1beta1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Table: %#v", table)
|
||||
|
||||
@ -141,14 +143,22 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
It("should return a 406 for a backend which does not implement metadata", func() {
|
||||
c := f.ClientSet
|
||||
|
||||
table := &metav1alpha1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("services").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
|
||||
table := &metav1beta1.Table{}
|
||||
sar := &authorizationv1.SelfSubjectAccessReview{
|
||||
Spec: authorizationv1.SelfSubjectAccessReviewSpec{
|
||||
NonResourceAttributes: &authorizationv1.NonResourceAttributes{
|
||||
Path: "/",
|
||||
Verb: "get",
|
||||
},
|
||||
},
|
||||
}
|
||||
err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Body(sar).Do().Into(table)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.(errors.APIStatus).Status().Code).To(Equal(int32(406)))
|
||||
})
|
||||
})
|
||||
|
||||
func printTable(table *metav1alpha1.Table) string {
|
||||
func printTable(table *metav1beta1.Table) string {
|
||||
buf := &bytes.Buffer{}
|
||||
tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0)
|
||||
err := printers.PrintTable(table, tw, printers.PrintOptions{})
|
||||
|
314
vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go
generated
vendored
314
vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go
generated
vendored
@ -27,8 +27,6 @@ import (
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@ -39,6 +37,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -46,28 +45,29 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
secretName = "sample-webhook-secret"
|
||||
deploymentName = "sample-webhook-deployment"
|
||||
serviceName = "e2e-test-webhook"
|
||||
roleBindingName = "webhook-auth-reader"
|
||||
secretName = "sample-webhook-secret"
|
||||
deploymentName = "sample-webhook-deployment"
|
||||
serviceName = "e2e-test-webhook"
|
||||
roleBindingName = "webhook-auth-reader"
|
||||
|
||||
// The webhook configuration names should not be reused between test instances.
|
||||
crdWebhookConfigName = "e2e-test-webhook-config-crd"
|
||||
webhookConfigName = "e2e-test-webhook-config"
|
||||
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
|
||||
skipNamespaceLabelKey = "skip-webhook-admission"
|
||||
skipNamespaceLabelValue = "yes"
|
||||
skippedNamespaceName = "exempted-namesapce"
|
||||
disallowedPodName = "disallowed-pod"
|
||||
disallowedConfigMapName = "disallowed-configmap"
|
||||
allowedConfigMapName = "allowed-configmap"
|
||||
crdName = "e2e-test-webhook-crd"
|
||||
crdKind = "E2e-test-webhook-crd"
|
||||
crdWebhookConfigName = "e2e-test-webhook-config-crd"
|
||||
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
|
||||
crdMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-crd"
|
||||
crdAPIGroup = "webhook-crd-test.k8s.io"
|
||||
crdAPIVersion = "v1"
|
||||
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
|
||||
failNamespaceLabelKey = "fail-closed-webhook"
|
||||
failNamespaceLabelValue = "yes"
|
||||
failNamespaceName = "fail-closed-namesapce"
|
||||
|
||||
skipNamespaceLabelKey = "skip-webhook-admission"
|
||||
skipNamespaceLabelValue = "yes"
|
||||
skippedNamespaceName = "exempted-namesapce"
|
||||
disallowedPodName = "disallowed-pod"
|
||||
hangingPodName = "hanging-pod"
|
||||
disallowedConfigMapName = "disallowed-configmap"
|
||||
allowedConfigMapName = "allowed-configmap"
|
||||
failNamespaceLabelKey = "fail-closed-webhook"
|
||||
failNamespaceLabelValue = "yes"
|
||||
failNamespaceName = "fail-closed-namesapce"
|
||||
)
|
||||
|
||||
var serverWebhookVersion = utilversion.MustParseSemantic("v1.8.0")
|
||||
@ -99,44 +99,57 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
||||
// Note that in 1.9 we will have backwards incompatible change to
|
||||
// admission webhooks, so the image will be updated to 1.9 sometime in
|
||||
// the development 1.9 cycle.
|
||||
deployWebhookAndService(f, "gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v6", context)
|
||||
deployWebhookAndService(f, imageutils.GetE2EImage(imageutils.AdmissionWebhook), context)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cleanWebhookTest(client, namespaceName)
|
||||
})
|
||||
|
||||
It("Should be able to deny pod and configmap creation", func() {
|
||||
registerWebhook(f, context)
|
||||
defer client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(webhookConfigName, nil)
|
||||
webhookCleanup := registerWebhook(f, context)
|
||||
defer webhookCleanup()
|
||||
testWebhook(f)
|
||||
})
|
||||
|
||||
It("Should be able to deny custom resource creation", func() {
|
||||
crdCleanup, dynamicClient := createCRD(f)
|
||||
defer crdCleanup()
|
||||
registerWebhookForCRD(f, context)
|
||||
defer client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(crdWebhookConfigName, nil)
|
||||
testCRDWebhook(f, dynamicClient)
|
||||
testcrd, err := framework.CreateTestCRD(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer testcrd.CleanUp()
|
||||
webhookCleanup := registerWebhookForCRD(f, context, testcrd)
|
||||
defer webhookCleanup()
|
||||
testCRDWebhook(f, testcrd.Crd, testcrd.DynamicClient)
|
||||
})
|
||||
|
||||
It("Should unconditionally reject operations on fail closed webhook", func() {
|
||||
registerFailClosedWebhook(f, context)
|
||||
defer f.ClientSet.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(webhookFailClosedConfigName, nil)
|
||||
webhookCleanup := registerFailClosedWebhook(f, context)
|
||||
defer webhookCleanup()
|
||||
testFailClosedWebhook(f)
|
||||
})
|
||||
|
||||
It("Should mutate configmap", func() {
|
||||
registerMutatingWebhookForConfigMap(f, context)
|
||||
defer client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(mutatingWebhookConfigName, nil)
|
||||
webhookCleanup := registerMutatingWebhookForConfigMap(f, context)
|
||||
defer webhookCleanup()
|
||||
testMutatingConfigMapWebhook(f)
|
||||
})
|
||||
|
||||
It("Should mutate pod and apply defaults after mutation", func() {
|
||||
webhookCleanup := registerMutatingWebhookForPod(f, context)
|
||||
defer webhookCleanup()
|
||||
testMutatingPodWebhook(f)
|
||||
})
|
||||
|
||||
It("Should mutate crd", func() {
|
||||
crdCleanup, dynamicClient := createCRD(f)
|
||||
defer crdCleanup()
|
||||
registerMutatingWebhookForCRD(f, context)
|
||||
defer client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(crdMutatingWebhookConfigName, nil)
|
||||
testMutatingCRDWebhook(f, dynamicClient)
|
||||
testcrd, err := framework.CreateTestCRD(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer testcrd.CleanUp()
|
||||
webhookCleanup := registerMutatingWebhookForCRD(f, context, testcrd)
|
||||
defer webhookCleanup()
|
||||
testMutatingCRDWebhook(f, testcrd.Crd, testcrd.DynamicClient)
|
||||
})
|
||||
|
||||
// TODO: add more e2e tests for mutating webhooks
|
||||
@ -291,11 +304,12 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
|
||||
|
||||
func strPtr(s string) *string { return &s }
|
||||
|
||||
func registerWebhook(f *framework.Framework, context *certContext) {
|
||||
func registerWebhook(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := webhookConfigName
|
||||
// A webhook that cannot talk to server, with fail-open policy
|
||||
failOpenHook := failingWebhook(namespace, "fail-open.k8s.io")
|
||||
policyIgnore := v1beta1.Ignore
|
||||
@ -303,7 +317,7 @@ func registerWebhook(f *framework.Framework, context *certContext) {
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: webhookConfigName,
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
@ -359,21 +373,26 @@ func registerWebhook(f *framework.Framework, context *certContext) {
|
||||
failOpenHook,
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", webhookConfigName, namespace)
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return func() {
|
||||
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certContext) {
|
||||
func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating configmap webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := mutatingWebhookConfigName
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: mutatingWebhookConfigName,
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
@ -416,11 +435,13 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certCo
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", mutatingWebhookConfigName, namespace)
|
||||
framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
time.Sleep(10 * time.Second)
|
||||
return func() { client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) }
|
||||
}
|
||||
|
||||
func testMutatingConfigMapWebhook(f *framework.Framework) {
|
||||
By("create a configmap that should be updated by the webhook")
|
||||
client := f.ClientSet
|
||||
@ -437,6 +458,80 @@ func testMutatingConfigMapWebhook(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
|
||||
func registerMutatingWebhookForPod(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating pod webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := podMutatingWebhookConfigName
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "adding-init-container.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"pods"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-pods"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return func() { client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) }
|
||||
}
|
||||
|
||||
func testMutatingPodWebhook(f *framework.Framework) {
|
||||
By("create a pod that should be updated by the webhook")
|
||||
client := f.ClientSet
|
||||
configMap := toBeMutatedPod(f)
|
||||
mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(configMap)
|
||||
Expect(err).To(BeNil())
|
||||
if len(mutatedPod.Spec.InitContainers) != 1 {
|
||||
framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers)
|
||||
}
|
||||
if got, expected := mutatedPod.Spec.InitContainers[0].Name, "webhook-added-init-container"; got != expected {
|
||||
framework.Failf("expect the init container name to be %q, got %q", expected, got)
|
||||
}
|
||||
if got, expected := mutatedPod.Spec.InitContainers[0].TerminationMessagePolicy, v1.TerminationMessageReadFile; got != expected {
|
||||
framework.Failf("expect the init terminationMessagePolicy to be default to %q, got %q", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func toBeMutatedPod(f *framework.Framework) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "webhook-to-be-mutated",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "example",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testWebhook(f *framework.Framework) {
|
||||
By("create a pod that should be denied by the webhook")
|
||||
client := f.ClientSet
|
||||
@ -453,6 +548,17 @@ func testWebhook(f *framework.Framework) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg2, err.Error())
|
||||
}
|
||||
|
||||
By("create a pod that causes the webhook to hang")
|
||||
client = f.ClientSet
|
||||
// Creating the pod, the request should be rejected
|
||||
pod = hangingPod(f)
|
||||
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(BeNil())
|
||||
expectedTimeoutErr := "request did not complete within allowed duration"
|
||||
if !strings.Contains(err.Error(), expectedTimeoutErr) {
|
||||
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
|
||||
}
|
||||
|
||||
By("create a configmap that should be denied by the webhook")
|
||||
// Creating the configmap, the request should be rejected
|
||||
configmap := nonCompliantConfigMap(f)
|
||||
@ -539,11 +645,12 @@ func failingWebhook(namespace, name string) v1beta1.Webhook {
|
||||
}
|
||||
}
|
||||
|
||||
func registerFailClosedWebhook(f *framework.Framework, context *certContext) {
|
||||
func registerFailClosedWebhook(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := webhookFailClosedConfigName
|
||||
// A webhook that cannot talk to server, with fail-closed policy
|
||||
policyFail := v1beta1.Fail
|
||||
hook := failingWebhook(namespace, "fail-closed.k8s.io")
|
||||
@ -560,7 +667,7 @@ func registerFailClosedWebhook(f *framework.Framework, context *certContext) {
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: webhookFailClosedConfigName,
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
// Server cannot talk to this webhook, so it always fails.
|
||||
@ -568,10 +675,13 @@ func registerFailClosedWebhook(f *framework.Framework, context *certContext) {
|
||||
hook,
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", webhookFailClosedConfigName, namespace)
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
return func() {
|
||||
f.ClientSet.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func testFailClosedWebhook(f *framework.Framework) {
|
||||
@ -631,6 +741,25 @@ func nonCompliantPod(f *framework.Framework) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func hangingPod(f *framework.Framework) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: hangingPodName,
|
||||
Labels: map[string]string{
|
||||
"webhook-e2e-test": "wait-forever",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "wait-forever",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func nonCompliantConfigMap(f *framework.Framework) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -686,64 +815,15 @@ func cleanWebhookTest(client clientset.Interface, namespaceName string) {
|
||||
_ = client.RbacV1beta1().RoleBindings("kube-system").Delete(roleBindingName, nil)
|
||||
}
|
||||
|
||||
// newCRDForAdmissionWebhookTest generates a CRD
|
||||
func newCRDForAdmissionWebhookTest() *apiextensionsv1beta1.CustomResourceDefinition {
|
||||
return &apiextensionsv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: crdName + "s." + crdAPIGroup},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: crdAPIGroup,
|
||||
Version: crdAPIVersion,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: crdName + "s",
|
||||
Singular: crdName,
|
||||
Kind: crdKind,
|
||||
ListKind: crdName + "List",
|
||||
},
|
||||
Scope: apiextensionsv1beta1.NamespaceScoped,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createCRD(f *framework.Framework) (func(), dynamic.ResourceInterface) {
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
apiExtensionClient, err := crdclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
}
|
||||
|
||||
crd := newCRDForAdmissionWebhookTest()
|
||||
|
||||
//create CRD and waits for the resource to be recognized and available.
|
||||
dynamicClient, err := testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient, f.ClientPool)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
resourceClient := dynamicClient.Resource(&metav1.APIResource{
|
||||
Name: crd.Spec.Names.Plural,
|
||||
Namespaced: true,
|
||||
}, f.Namespace.Name)
|
||||
|
||||
return func() {
|
||||
err = testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
}, resourceClient
|
||||
}
|
||||
|
||||
func registerWebhookForCRD(f *framework.Framework, context *certContext) {
|
||||
func registerWebhookForCRD(f *framework.Framework, context *certContext, testcrd *framework.TestCrd) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the crd webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := crdWebhookConfigName
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: crdWebhookConfigName,
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
@ -751,9 +831,9 @@ func registerWebhookForCRD(f *framework.Framework, context *certContext) {
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{crdAPIGroup},
|
||||
APIVersions: []string{crdAPIVersion},
|
||||
Resources: []string{crdName + "s"},
|
||||
APIGroups: []string{testcrd.ApiGroup},
|
||||
APIVersions: []string{testcrd.ApiVersion},
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
@ -767,20 +847,24 @@ func registerWebhookForCRD(f *framework.Framework, context *certContext) {
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", webhookConfigName, namespace)
|
||||
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
time.Sleep(10 * time.Second)
|
||||
return func() {
|
||||
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func registerMutatingWebhookForCRD(f *framework.Framework, context *certContext) {
|
||||
func registerMutatingWebhookForCRD(f *framework.Framework, context *certContext, testcrd *framework.TestCrd) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating webhook for crd via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := crdMutatingWebhookConfigName
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: crdMutatingWebhookConfigName,
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
@ -788,9 +872,9 @@ func registerMutatingWebhookForCRD(f *framework.Framework, context *certContext)
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{crdAPIGroup},
|
||||
APIVersions: []string{crdAPIVersion},
|
||||
Resources: []string{crdName + "s"},
|
||||
APIGroups: []string{testcrd.ApiGroup},
|
||||
APIVersions: []string{testcrd.ApiVersion},
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
@ -807,9 +891,9 @@ func registerMutatingWebhookForCRD(f *framework.Framework, context *certContext)
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{crdAPIGroup},
|
||||
APIVersions: []string{crdAPIVersion},
|
||||
Resources: []string{crdName + "s"},
|
||||
APIGroups: []string{testcrd.ApiGroup},
|
||||
APIVersions: []string{testcrd.ApiVersion},
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
@ -823,15 +907,16 @@ func registerMutatingWebhookForCRD(f *framework.Framework, context *certContext)
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", crdMutatingWebhookConfigName, namespace)
|
||||
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return func() { client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) }
|
||||
}
|
||||
|
||||
func testCRDWebhook(f *framework.Framework, crdClient dynamic.ResourceInterface) {
|
||||
func testCRDWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, crdClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be denied by the webhook")
|
||||
crd := newCRDForAdmissionWebhookTest()
|
||||
crInstance := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
@ -853,9 +938,8 @@ func testCRDWebhook(f *framework.Framework, crdClient dynamic.ResourceInterface)
|
||||
}
|
||||
}
|
||||
|
||||
func testMutatingCRDWebhook(f *framework.Framework, crdClient dynamic.ResourceInterface) {
|
||||
func testMutatingCRDWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, crdClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be mutated by the webhook")
|
||||
crd := newCRDForAdmissionWebhookTest()
|
||||
cr := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
@ -33,13 +33,13 @@ go_library(
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@ -47,14 +47,12 @@ go_library(
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
@ -222,7 +222,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring MissingJob event has occured")
|
||||
By("Ensuring MissingJob event has occurred")
|
||||
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
@ -430,7 +430,7 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// checkNoEventWithReason checks no events with a reason within a list has occured
|
||||
// checkNoEventWithReason checks no events with a reason within a list has occurred
|
||||
func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
|
||||
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
@ -249,7 +249,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
// Requires master ssh access.
|
||||
framework.SkipUnlessProviderIs("gce", "aws")
|
||||
restarter := NewRestartConfig(
|
||||
framework.GetMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout)
|
||||
framework.GetMasterHost(), "kube-controller", ports.InsecureKubeControllerManagerPort, restartPollInterval, restartTimeout)
|
||||
restarter.restart()
|
||||
|
||||
// The intent is to ensure the replication controller manager has observed and reported status of
|
||||
@ -257,7 +257,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
|
||||
// to the same size achieves this, because the scale operation advances the RC's sequence number
|
||||
// and awaits it to be observed and reported back in the RC's status.
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods, true)
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods, true)
|
||||
|
||||
// Only check the keys, the pods can be different if the kubelet updated it.
|
||||
// TODO: Can it really?
|
||||
@ -288,9 +288,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
restarter.kill()
|
||||
// This is best effort to try and create pods while the scheduler is down,
|
||||
// since we don't know exactly when it is restarted after the kill signal.
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, false))
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, false))
|
||||
restarter.waitUp()
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, true))
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, true))
|
||||
})
|
||||
|
||||
It("Kubelet should not restart containers across restart", func() {
|
||||
|
196
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
196
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
@ -22,10 +22,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -39,7 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -67,7 +65,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
|
||||
AfterEach(func() {
|
||||
// Clean up
|
||||
daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
@ -80,7 +78,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
@ -114,7 +112,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
@ -138,7 +136,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.Logf("Creating daemon %q with a node selector", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
@ -167,7 +165,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
||||
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred(), "error patching daemon set")
|
||||
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
@ -199,7 +197,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
@ -229,7 +227,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
@ -253,54 +251,43 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
By("Make sure all daemon pods have correct template generation 1")
|
||||
templateGeneration := "1"
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, "1")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
firstHash := first.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(first.Revision).To(Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(2)))
|
||||
|
||||
By("Check that daemon pods images aren't updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Make sure all daemon pods have correct template generation 1")
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, templateGeneration)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
Expect(cur.Revision).To(Equal(int64(2)))
|
||||
Expect(cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration)
|
||||
Expect(cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
})
|
||||
|
||||
It("Should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
||||
@ -309,11 +296,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
templateGeneration := int64(999)
|
||||
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.TemplateGeneration = templateGeneration
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
@ -324,20 +309,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash := cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(cur.Revision).To(Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
templateGeneration++
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
By("Check that daemon pods images are updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
||||
@ -352,90 +336,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash = cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(cur.Revision).To(Equal(int64(2)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration))
|
||||
})
|
||||
|
||||
It("Should adopt existing pods when creating a RollingUpdate DaemonSet regardless of templateGeneration", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
// 1. Create a RollingUpdate DaemonSet
|
||||
templateGeneration := int64(999)
|
||||
framework.Logf("Creating simple RollingUpdate DaemonSet %s with templateGeneration %d", dsName, templateGeneration)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.TemplateGeneration = templateGeneration
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Make sure all daemon pods have correct template generation %d", templateGeneration)
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 2. Orphan DaemonSet pods
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", dsName)
|
||||
deleteDaemonSetAndOrphan(c, ds)
|
||||
|
||||
// 3. Adopt DaemonSet pods (no restart)
|
||||
newDSName := "adopt"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName)
|
||||
newDS := newDaemonSet(newDSName, image, label)
|
||||
newDS.Spec.TemplateGeneration = templateGeneration
|
||||
newDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newDS.Spec.Template, ds.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newDS.Name)
|
||||
waitDaemonSetAdoption(c, newDS, ds.Name, templateGeneration)
|
||||
|
||||
// 4. Orphan DaemonSet pods again
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newDSName)
|
||||
deleteDaemonSetAndOrphan(c, newDS)
|
||||
|
||||
// 5. Adopt DaemonSet pods (no restart) as long as template matches, even when templateGeneration doesn't match
|
||||
newAdoptDSName := "adopt-template-matches"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS := newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).NotTo(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
||||
framework.Logf(fmt.Sprintf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name))
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
|
||||
// 6. Orphan DaemonSet pods again
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newAdoptDSName)
|
||||
deleteDaemonSetAndOrphan(c, newAdoptDS)
|
||||
|
||||
// 7. Adopt DaemonSet pods (no restart) as long as templateGeneration matches, even when template doesn't match
|
||||
newAdoptDSName = "adopt-template-generation-matches"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS = newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.Template.Spec.Containers[0].Name = "not-match"
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS.Spec.TemplateGeneration = templateGeneration
|
||||
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).NotTo(BeTrue(), "DaemonSet template should not match")
|
||||
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name)
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
})
|
||||
|
||||
It("Should rollback without unnecessary restarts", func() {
|
||||
@ -445,8 +352,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
@ -456,7 +363,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
newImage := "foo:non-existent"
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -483,7 +390,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(len(newPods)).NotTo(Equal(0))
|
||||
|
||||
framework.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -511,11 +418,11 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
|
||||
|
||||
// deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents.
|
||||
// It also checks that all dependents are orphaned, and the DaemonSet is deleted.
|
||||
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) {
|
||||
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *apps.DaemonSet) {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
|
||||
err := c.ExtensionsV1beta1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
err := c.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
@ -526,12 +433,12 @@ func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) {
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
||||
}
|
||||
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet {
|
||||
return &extensions.DaemonSet{
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet {
|
||||
return &apps.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: label,
|
||||
@ -623,7 +530,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
return newNode, nil
|
||||
}
|
||||
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nodeNames []string) func() (bool, error) {
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames []string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -662,14 +569,14 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nod
|
||||
}
|
||||
}
|
||||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
nodeNames := schedulableNodes(f.ClientSet, ds)
|
||||
return checkDaemonPodOnNodes(f, ds, nodeNames)()
|
||||
}
|
||||
}
|
||||
|
||||
func schedulableNodes(c clientset.Interface, ds *extensions.DaemonSet) []string {
|
||||
func schedulableNodes(c clientset.Interface, ds *apps.DaemonSet) []string {
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeNames := make([]string, 0)
|
||||
@ -696,7 +603,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
|
||||
}
|
||||
|
||||
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
|
||||
func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool {
|
||||
func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool {
|
||||
newPod := daemon.NewPod(ds, node.Name)
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
@ -708,12 +615,12 @@ func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool {
|
||||
return fit
|
||||
}
|
||||
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) {
|
||||
return checkDaemonPodOnNodes(f, ds, make([]string, 0))
|
||||
}
|
||||
|
||||
func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
ds, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
|
||||
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get daemon set from v1.")
|
||||
}
|
||||
@ -724,7 +631,7 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -770,7 +677,7 @@ func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label m
|
||||
if !controller.IsPodActive(&pod) {
|
||||
continue
|
||||
}
|
||||
podTemplateGeneration := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
podTemplateGeneration := pod.Labels[apps.DeprecatedTemplateGeneration]
|
||||
if podTemplateGeneration != templateGeneration {
|
||||
return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
|
||||
}
|
||||
@ -780,7 +687,7 @@ func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label m
|
||||
|
||||
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
_, err := c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
_, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
@ -840,7 +747,7 @@ func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types.
|
||||
}
|
||||
}
|
||||
|
||||
func waitDaemonSetAdoption(c clientset.Interface, ds *extensions.DaemonSet, podPrefix string, podTemplateGeneration int64) {
|
||||
func waitDaemonSetAdoption(c clientset.Interface, ds *apps.DaemonSet, podPrefix string, podTemplateGeneration int64) {
|
||||
ns := ds.Namespace
|
||||
label := ds.Spec.Template.Labels
|
||||
|
||||
@ -868,16 +775,13 @@ func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsLabels(podList *v1.PodList, hash, templateGeneration string) {
|
||||
func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
|
||||
for _, pod := range podList.Items {
|
||||
podHash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
podTemplate := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(len(podHash)).To(BeNumerically(">", 0))
|
||||
if len(hash) > 0 {
|
||||
Expect(podHash).To(Equal(hash))
|
||||
}
|
||||
Expect(len(podTemplate)).To(BeNumerically(">", 0))
|
||||
Expect(podTemplate).To(Equal(templateGeneration))
|
||||
}
|
||||
}
|
||||
|
||||
@ -902,19 +806,19 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
|
||||
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1beta1().ControllerRevisions(ns).List(options)
|
||||
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(historyList.Items)).To(BeNumerically(">", 0))
|
||||
return historyList
|
||||
}
|
||||
|
||||
func curHistory(historyList *apps.ControllerRevisionList, ds *extensions.DaemonSet) *apps.ControllerRevision {
|
||||
func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *apps.ControllerRevision {
|
||||
var curHistory *apps.ControllerRevision
|
||||
foundCurHistories := 0
|
||||
for i := range historyList.Items {
|
||||
history := &historyList.Items[i]
|
||||
// Every history should have the hash label
|
||||
Expect(len(history.Labels[extensions.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0))
|
||||
Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0))
|
||||
match, err := daemon.Match(ds, history)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if match {
|
||||
|
143
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
143
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
@ -92,6 +92,9 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
|
||||
testDeploymentsControllerRef(f)
|
||||
})
|
||||
It("deployment should support proportional scaling", func() {
|
||||
testProportionalScalingDeployment(f)
|
||||
})
|
||||
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
|
||||
// See https://github.com/kubernetes/kubernetes/issues/29229
|
||||
})
|
||||
@ -423,7 +426,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
err = framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
err = framework.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
@ -760,11 +763,18 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Checking its ReplicaSet has the right controllerRef")
|
||||
framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
|
||||
rsList := listDeploymentReplicaSets(c, ns, podLabels)
|
||||
Expect(len(rsList.Items)).Should(Equal(1))
|
||||
|
||||
framework.Logf("Obtaining the ReplicaSet's UID")
|
||||
orphanedRSUID := rsList.Items[0].UID
|
||||
|
||||
framework.Logf("Checking the ReplicaSet has the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSets", deploymentName)
|
||||
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
|
||||
err = orphanDeploymentReplicaSets(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -783,6 +793,133 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
|
||||
rsList = listDeploymentReplicaSets(c, ns, podLabels)
|
||||
Expect(len(rsList.Items)).Should(Equal(1))
|
||||
|
||||
framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
|
||||
Expect(rsList.Items[0].UID).Should(Equal(orphanedRSUID))
|
||||
}
|
||||
|
||||
// testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle
|
||||
// of a rollout (either in progress or paused), then the Deployment will balance additional replicas
|
||||
// in existing active ReplicaSets (ReplicaSets with more than 0 replica) in order to mitigate risk.
|
||||
func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(10)
|
||||
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx-deployment"
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
framework.Logf("Waiting for all required pods to come up")
|
||||
err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas))
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Update the deployment with a non-existent image so that the new replica set
|
||||
// will be blocked to simulate a partial rollout.
|
||||
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
||||
|
||||
// Checking state of first rollout's replicaset.
|
||||
maxUnavailable, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
|
||||
minAvailableReplicas := replicas - int32(maxUnavailable)
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
|
||||
Expect(framework.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 too.
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), firstRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Checking state of second rollout's replicaset.
|
||||
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Second rollout's replicaset should have 0 available replicas.
|
||||
framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
|
||||
Expect(secondRS.Status.AvailableReplicas).Should(Equal(int32(0)))
|
||||
|
||||
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
|
||||
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
|
||||
framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(HaveOccurred())
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), secondRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check the deployment's minimum availability.
|
||||
framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName)
|
||||
if deployment.Status.AvailableReplicas < minAvailableReplicas {
|
||||
Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Scale the deployment to 30 replicas.
|
||||
newReplicas = int32(30)
|
||||
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
|
||||
// Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
|
||||
framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(HaveOccurred())
|
||||
|
||||
// Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
|
||||
// Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
|
||||
framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// schedulingTimeout is longer specifically because sometimes we need to wait
|
||||
@ -64,7 +65,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
// Since disruptionAllowed starts out 0, if we see it ever become positive,
|
||||
// that means the controller is working.
|
||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
pdb, err := cs.Policy().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
|
||||
pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -226,7 +227,7 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
}
|
||||
_, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
@ -241,7 +242,7 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
},
|
||||
}
|
||||
_, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
@ -257,7 +258,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
Image: imageutils.GetE2EImage(imageutils.EchoServer),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
@ -301,7 +302,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) {
|
||||
container := v1.Container{
|
||||
Name: "busybox",
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
Image: imageutils.GetE2EImage(imageutils.EchoServer),
|
||||
}
|
||||
if exclusive {
|
||||
container.Ports = []v1.ContainerPort{
|
||||
@ -330,6 +331,6 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.Extensions().ReplicaSets(ns).Create(rs)
|
||||
_, err := cs.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
|
||||
framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
@ -77,12 +77,15 @@ var _ = SIGDescribe("Job", func() {
|
||||
// Worst case analysis: 15 failures, each taking 1 minute to
|
||||
// run due to some slowness, 1 in 2^15 chance of happening,
|
||||
// causing test flake. Should be very rare.
|
||||
job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, 999)
|
||||
// With the introduction of backoff limit and high failure rate this
|
||||
// is hitting its timeout, the 3 is a reasonable that should make this
|
||||
// test less flaky, for now.
|
||||
job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, 3, nil, 999)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
@ -33,7 +33,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
nodepkg "k8s.io/kubernetes/pkg/controller/node"
|
||||
nodepkg "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@ -104,25 +104,20 @@ func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) er
|
||||
|
||||
var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
f := framework.NewDefaultFramework("network-partition")
|
||||
var systemPodsNo int32
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
var group string
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
framework.SkipUnlessProviderIs("gke", "aws")
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
} else {
|
||||
group = framework.TestContext.CloudConfig.NodeInstanceGroup
|
||||
}
|
||||
})
|
||||
|
||||
@ -371,7 +366,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ps)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
@ -387,7 +382,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
|
||||
It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ps)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
|
90
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
90
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
@ -23,8 +23,7 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
klabels "k8s.io/apimachinery/pkg/labels"
|
||||
@ -89,13 +88,15 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should provide basic identity", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
*(ss.Spec.Replicas) = 3
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.PauseNewPods(ss)
|
||||
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Saturating stateful set " + ss.Name)
|
||||
@ -126,6 +127,8 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd))
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should adopt matching orphans and release non-matching pods", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
*(ss.Spec.Replicas) = 1
|
||||
@ -135,7 +138,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
// Replace ss with the one returned from Create() so it has the UID.
|
||||
// Save Kind since it won't be populated in the returned ss.
|
||||
kind := ss.Kind
|
||||
ss, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss.Kind = kind
|
||||
|
||||
@ -209,13 +212,15 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
)).To(Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should not deadlock when a pod's predecessor fails", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
*(ss.Spec.Replicas) = 2
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.PauseNewPods(ss)
|
||||
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
sst.WaitForRunning(1, 0, ss)
|
||||
@ -243,12 +248,20 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
})
|
||||
|
||||
It("should perform rolling updates and roll backs of template modifications", func() {
|
||||
/*
|
||||
Testname: StatefulSet-RollingUpdate
|
||||
Description: StatefulSet MUST support the RollingUpdate strategy to automatically replace Pods
|
||||
one at a time when the Pod template changes. The StatefulSet's status MUST indicate the
|
||||
CurrentRevision and UpdateRevision. If the template is changed to match a prior revision,
|
||||
StatefulSet MUST detect this as a rollback instead of creating a new revision.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
|
||||
By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHttpProbe(ss)
|
||||
ss, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
@ -292,7 +305,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal updste revision %s on update completion",
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
@ -325,7 +338,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
"Current revision should not equal update revision during roll bakc")
|
||||
"Current revision should not equal update revision during roll back")
|
||||
Expect(priorRevision).To(Equal(updateRevision),
|
||||
"Prior revision should equal update revision during roll back")
|
||||
|
||||
@ -358,7 +371,14 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should perform canary updates and phased rolling updates of template modifications", func() {
|
||||
/*
|
||||
Testname: StatefulSet-RollingUpdatePartition
|
||||
Description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter for
|
||||
canaries and phased rollouts. If a Pod is deleted while a rolling update is in progress,
|
||||
StatefulSet MUST restore the Pod without violating the Partition.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() {
|
||||
By("Creating a new StaefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
@ -373,7 +393,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}()}
|
||||
}(),
|
||||
}
|
||||
ss, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
@ -422,7 +442,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
currentRevision))
|
||||
}
|
||||
|
||||
By("By performing a canary update")
|
||||
By("Performing a canary update")
|
||||
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
|
||||
@ -567,6 +587,8 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs.
|
||||
It("should implement legacy replacement when the update strategy is OnDelete", func() {
|
||||
By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
@ -575,7 +597,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.OnDeleteStatefulSetStrategyType,
|
||||
}
|
||||
ss, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
@ -647,7 +669,14 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
|
||||
/*
|
||||
Testname: StatefulSet-Scaling
|
||||
Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up,
|
||||
and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any
|
||||
Pods belonging to the StatefulSet are unhealthy.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
|
||||
psLabels := klabels.Set(labels)
|
||||
By("Initializing watcher for selector " + psLabels.String())
|
||||
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
||||
@ -659,7 +688,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHttpProbe(ss)
|
||||
ss, err = c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err = c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||
@ -723,7 +752,12 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Burst scaling should run to completion even with unhealthy pods", func() {
|
||||
/*
|
||||
Testname: StatefulSet-BurstScaling
|
||||
Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods", func() {
|
||||
psLabels := klabels.Set(labels)
|
||||
|
||||
By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||
@ -731,7 +765,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss.Spec.PodManagementPolicy = apps.ParallelPodManagement
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHttpProbe(ss)
|
||||
ss, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||
@ -761,7 +795,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
sst.WaitForStatusReplicas(ss, 0)
|
||||
})
|
||||
|
||||
It("Should recreate evicted statefulset", func() {
|
||||
/*
|
||||
Testname: StatefulSet-RecreateFailedPod
|
||||
Description: StatefulSet MUST delete and recreate Pods it owns that go into a Failed state,
|
||||
such as when they are rejected or evicted by a Node.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("Should recreate evicted statefulset", func() {
|
||||
podName := "test-pod"
|
||||
statefulPodName := ssName + "-0"
|
||||
By("Looking for a node to schedule stateful set and pod")
|
||||
@ -793,7 +833,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
statefulPodContainer := &ss.Spec.Template.Spec.Containers[0]
|
||||
statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort)
|
||||
ss.Spec.Template.Spec.NodeName = node.Name
|
||||
_, err = f.ClientSet.AppsV1beta1().StatefulSets(f.Namespace.Name).Create(ss)
|
||||
_, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
|
||||
@ -844,7 +884,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
return nil
|
||||
}, framework.StatefulPodTimeout, 2*time.Second).Should(BeNil())
|
||||
})
|
||||
|
||||
/* Comment it for now until scale sub-resource is finalized in ref:pull/53679 for scale-sub resource specific comment.
|
||||
It("should have a working scale subresource", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
|
||||
@ -858,6 +898,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
By("getting scale subresource")
|
||||
scale := framework.NewStatefulSetScale(ss)
|
||||
scaleResult := &appsv1beta2.Scale{}
|
||||
|
||||
err = c.AppsV1beta2().RESTClient().Get().AbsPath("/apis/apps/v1beta2").Namespace(ns).Resource("statefulsets").Name(ssName).SubResource("scale").Do().Into(scale)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get scale subresource: %v", err)
|
||||
@ -881,6 +922,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
Expect(*(ss.Spec.Replicas)).To(Equal(int32(2)))
|
||||
})
|
||||
*/
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() {
|
||||
@ -900,21 +942,29 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working zookeeper cluster", func() {
|
||||
appTester.statefulPod = &zookeeperTester{tester: sst}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working redis cluster", func() {
|
||||
appTester.statefulPod = &redisTester{tester: sst}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working mysql cluster", func() {
|
||||
appTester.statefulPod = &mysqlGaleraTester{tester: sst}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working CockroachDB cluster", func() {
|
||||
appTester.statefulPod = &cockroachDBTester{tester: sst}
|
||||
appTester.run()
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/auth/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/auth/BUILD
generated
vendored
@ -21,6 +21,7 @@ go_library(
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//plugin/pkg/admission/serviceaccount:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
@ -32,6 +33,7 @@ go_library(
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/auth/audit.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/auth/audit.go
generated
vendored
@ -203,27 +203,27 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
podLabels := map[string]string{"name": "audit-deployment-pod"}
|
||||
d := framework.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), extensions.RecreateDeploymentStrategyType)
|
||||
|
||||
_, err := f.ClientSet.Extensions().Deployments(namespace).Create(d)
|
||||
_, err := f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "failed to create audit-deployment")
|
||||
|
||||
_, err = f.ClientSet.Extensions().Deployments(namespace).Get(d.Name, metav1.GetOptions{})
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Get(d.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get audit-deployment")
|
||||
|
||||
deploymentChan, err := f.ClientSet.Extensions().Deployments(namespace).Watch(watchOptions)
|
||||
deploymentChan, err := f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Watch(watchOptions)
|
||||
framework.ExpectNoError(err, "failed to create watch for deployments")
|
||||
for range deploymentChan.ResultChan() {
|
||||
}
|
||||
|
||||
_, err = f.ClientSet.Extensions().Deployments(namespace).Update(d)
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Update(d)
|
||||
framework.ExpectNoError(err, "failed to update audit-deployment")
|
||||
|
||||
_, err = f.ClientSet.Extensions().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch)
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch)
|
||||
framework.ExpectNoError(err, "failed to patch deployment")
|
||||
|
||||
_, err = f.ClientSet.Extensions().Deployments(namespace).List(metav1.ListOptions{})
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "failed to create list deployments")
|
||||
|
||||
err = f.ClientSet.Extensions().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{})
|
||||
err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "failed to delete deployments")
|
||||
},
|
||||
[]auditEvent{
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
imageutil "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Metadata Concealment", func() {
|
||||
@ -45,7 +46,7 @@ var _ = SIGDescribe("Metadata Concealment", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "check-metadata-concealment",
|
||||
Image: "gcr.io/google_containers/check-metadata-concealment:v0.0.2",
|
||||
Image: imageutil.GetE2EImage(imageutil.CheckMetadataConcealment),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
|
310
vendor/k8s.io/kubernetes/test/e2e/auth/pod_security_policy.go
generated
vendored
310
vendor/k8s.io/kubernetes/test/e2e/auth/pod_security_policy.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
|
||||
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
@ -39,64 +40,6 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var (
|
||||
restrictivePSPTemplate = &extensionsv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "restrictive",
|
||||
Annotations: map[string]string{
|
||||
seccomp.AllowedProfilesAnnotationKey: "docker/default",
|
||||
seccomp.DefaultProfileAnnotationKey: "docker/default",
|
||||
apparmor.AllowedProfilesAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
apparmor.DefaultProfileAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
"addonmanager.kubernetes.io/mode": "Reconcile",
|
||||
},
|
||||
},
|
||||
Spec: extensionsv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: false,
|
||||
AllowPrivilegeEscalation: boolPtr(false),
|
||||
RequiredDropCapabilities: []corev1.Capability{
|
||||
"AUDIT_WRITE",
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
"FOWNER",
|
||||
"FSETID",
|
||||
"KILL",
|
||||
"MKNOD",
|
||||
"NET_RAW",
|
||||
"SETGID",
|
||||
"SETUID",
|
||||
"SYS_CHROOT",
|
||||
},
|
||||
Volumes: []extensionsv1beta1.FSType{
|
||||
extensionsv1beta1.ConfigMap,
|
||||
extensionsv1beta1.EmptyDir,
|
||||
extensionsv1beta1.PersistentVolumeClaim,
|
||||
"projected",
|
||||
extensionsv1beta1.Secret,
|
||||
},
|
||||
HostNetwork: false,
|
||||
HostIPC: false,
|
||||
HostPID: false,
|
||||
RunAsUser: extensionsv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: extensionsv1beta1.RunAsUserStrategyMustRunAsNonRoot,
|
||||
},
|
||||
SELinux: extensionsv1beta1.SELinuxStrategyOptions{
|
||||
Rule: extensionsv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: extensionsv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: extensionsv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: extensionsv1beta1.FSGroupStrategyOptions{
|
||||
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
f := framework.NewDefaultFramework("podsecuritypolicy")
|
||||
f.SkipPrivilegedPSPBinding = true
|
||||
@ -131,41 +74,81 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
|
||||
It("should forbid pod creation when no PSP is available", func() {
|
||||
By("Running a restricted pod")
|
||||
_, err := c.Core().Pods(ns).Create(restrictedPod(f, "restricted"))
|
||||
_, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "restricted"))
|
||||
expectForbidden(err)
|
||||
})
|
||||
|
||||
It("should enforce the restricted PodSecurityPolicy", func() {
|
||||
// TODO: merge tests for extensions/policy API groups when PSP will be completely moved out of the extensions
|
||||
|
||||
It("should enforce the restricted extensions.PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a restricted policy for the test service account")
|
||||
_, cleanup := createAndBindPSP(f, restrictivePSPTemplate)
|
||||
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
|
||||
defer cleanup()
|
||||
|
||||
By("Running a restricted pod")
|
||||
pod, err := c.Core().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
_, err := c.Core().Pods(ns).Create(pod)
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
expectForbidden(err)
|
||||
})
|
||||
})
|
||||
|
||||
It("should allow pods under the privileged PodSecurityPolicy", func() {
|
||||
It("should enforce the restricted policy.PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a restricted policy for the test service account")
|
||||
_, cleanup := createAndBindPSPInPolicy(f, restrictedPSPInPolicy("restrictive"))
|
||||
defer cleanup()
|
||||
|
||||
By("Running a restricted pod")
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
expectForbidden(err)
|
||||
})
|
||||
})
|
||||
|
||||
It("should allow pods under the privileged extensions.PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a privileged policy for the test service account")
|
||||
// Ensure that the permissive policy is used even in the presence of the restricted policy.
|
||||
_, cleanup := createAndBindPSP(f, restrictivePSPTemplate)
|
||||
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
|
||||
defer cleanup()
|
||||
expectedPSP, cleanup := createAndBindPSP(f, framework.PrivilegedPSP("permissive"))
|
||||
defer cleanup()
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
p, err := c.Core().Pods(ns).Create(pod)
|
||||
p, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
|
||||
// Verify expected PSP was used.
|
||||
p, err = c.Core().Pods(ns).Get(p.Name, metav1.GetOptions{})
|
||||
p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
validated, found := p.Annotations[psputil.ValidatedPSPAnnotation]
|
||||
Expect(found).To(BeTrue(), "PSP annotation not found")
|
||||
Expect(validated).To(Equal(expectedPSP.Name), "Unexpected validated PSP")
|
||||
})
|
||||
})
|
||||
|
||||
It("should allow pods under the privileged policy.PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a privileged policy for the test service account")
|
||||
// Ensure that the permissive policy is used even in the presence of the restricted policy.
|
||||
_, cleanup := createAndBindPSPInPolicy(f, restrictedPSPInPolicy("restrictive"))
|
||||
defer cleanup()
|
||||
expectedPSP, cleanup := createAndBindPSPInPolicy(f, privilegedPSPInPolicy("permissive"))
|
||||
defer cleanup()
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
p, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
|
||||
// Verify expected PSP was used.
|
||||
p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
validated, found := p.Annotations[psputil.ValidatedPSPAnnotation]
|
||||
Expect(found).To(BeTrue(), "PSP annotation not found")
|
||||
@ -235,10 +218,10 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
|
||||
tester(unconfined)
|
||||
})
|
||||
|
||||
By("Running a CAP_SYS_ADMIN pod", func() {
|
||||
By("Running a SYS_ADMIN pod", func() {
|
||||
sysadmin := restrictedPod(f, "sysadmin")
|
||||
sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{
|
||||
Add: []v1.Capability{"CAP_SYS_ADMIN"},
|
||||
Add: []v1.Capability{"SYS_ADMIN"},
|
||||
}
|
||||
sysadmin.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
|
||||
tester(sysadmin)
|
||||
@ -285,6 +268,48 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *extensionsv1beta1.Pod
|
||||
}
|
||||
}
|
||||
|
||||
// createAndBindPSPInPolicy creates a PSP in the policy API group (unlike createAndBindPSP()).
|
||||
// TODO: merge these functions when PSP will be completely moved out of the extensions
|
||||
func createAndBindPSPInPolicy(f *framework.Framework, pspTemplate *policy.PodSecurityPolicy) (psp *policy.PodSecurityPolicy, cleanup func()) {
|
||||
// Create the PodSecurityPolicy object.
|
||||
psp = pspTemplate.DeepCopy()
|
||||
// Add the namespace to the name to ensure uniqueness and tie it to the namespace.
|
||||
ns := f.Namespace.Name
|
||||
name := fmt.Sprintf("%s-%s", ns, psp.Name)
|
||||
psp.Name = name
|
||||
psp, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(psp)
|
||||
framework.ExpectNoError(err, "Failed to create PSP")
|
||||
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = f.ClientSet.RbacV1beta1().Roles(ns).Create(&rbacv1beta1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Rules: []rbacv1beta1.PolicyRule{{
|
||||
APIGroups: []string{"policy"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{name},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to create PSP role")
|
||||
|
||||
// Bind the role to the namespace.
|
||||
framework.BindRoleInNamespace(f.ClientSet.RbacV1beta1(), name, ns, rbacv1beta1.Subject{
|
||||
Kind: rbacv1beta1.ServiceAccountKind,
|
||||
Namespace: ns,
|
||||
Name: "default",
|
||||
})
|
||||
framework.ExpectNoError(framework.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
|
||||
serviceaccount.MakeUsername(ns, "default"), ns, "use", name,
|
||||
schema.GroupResource{Group: "policy", Resource: "podsecuritypolicies"}, true))
|
||||
|
||||
return psp, func() {
|
||||
// Cleanup non-namespaced PSP object.
|
||||
f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
func restrictedPod(f *framework.Framework, name string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -307,6 +332,151 @@ func restrictedPod(f *framework.Framework, name string) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
// privilegedPSPInPolicy creates a PodSecurityPolicy (in the "policy" API Group) that allows everything.
|
||||
// TODO: replace by PrivilegedPSP when PSP will be completely moved out of the extensions
|
||||
func privilegedPSPInPolicy(name string) *policy.PodSecurityPolicy {
|
||||
return &policy.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{seccomp.AllowedProfilesAnnotationKey: seccomp.AllowAny},
|
||||
},
|
||||
Spec: policy.PodSecurityPolicySpec{
|
||||
Privileged: true,
|
||||
AllowPrivilegeEscalation: utilpointer.BoolPtr(true),
|
||||
AllowedCapabilities: []v1.Capability{"*"},
|
||||
Volumes: []policy.FSType{policy.All},
|
||||
HostNetwork: true,
|
||||
HostPorts: []policy.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostIPC: true,
|
||||
HostPID: true,
|
||||
RunAsUser: policy.RunAsUserStrategyOptions{
|
||||
Rule: policy.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
SELinux: policy.SELinuxStrategyOptions{
|
||||
Rule: policy.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policy.SupplementalGroupsStrategyOptions{
|
||||
Rule: policy.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policy.FSGroupStrategyOptions{
|
||||
Rule: policy.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// restrictedPSPInPolicy creates a PodSecurityPolicy (in the "policy" API Group) that is most strict.
|
||||
// TODO: replace by restrictedPSP when PSP will be completely moved out of the extensions
|
||||
func restrictedPSPInPolicy(name string) *policy.PodSecurityPolicy {
|
||||
return &policy.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
seccomp.AllowedProfilesAnnotationKey: "docker/default",
|
||||
seccomp.DefaultProfileAnnotationKey: "docker/default",
|
||||
apparmor.AllowedProfilesAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
apparmor.DefaultProfileAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Spec: policy.PodSecurityPolicySpec{
|
||||
Privileged: false,
|
||||
AllowPrivilegeEscalation: utilpointer.BoolPtr(false),
|
||||
RequiredDropCapabilities: []v1.Capability{
|
||||
"AUDIT_WRITE",
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
"FOWNER",
|
||||
"FSETID",
|
||||
"KILL",
|
||||
"MKNOD",
|
||||
"NET_RAW",
|
||||
"SETGID",
|
||||
"SETUID",
|
||||
"SYS_CHROOT",
|
||||
},
|
||||
Volumes: []policy.FSType{
|
||||
policy.ConfigMap,
|
||||
policy.EmptyDir,
|
||||
policy.PersistentVolumeClaim,
|
||||
"projected",
|
||||
policy.Secret,
|
||||
},
|
||||
HostNetwork: false,
|
||||
HostIPC: false,
|
||||
HostPID: false,
|
||||
RunAsUser: policy.RunAsUserStrategyOptions{
|
||||
Rule: policy.RunAsUserStrategyMustRunAsNonRoot,
|
||||
},
|
||||
SELinux: policy.SELinuxStrategyOptions{
|
||||
Rule: policy.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policy.SupplementalGroupsStrategyOptions{
|
||||
Rule: policy.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policy.FSGroupStrategyOptions{
|
||||
Rule: policy.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// restrictedPSP creates a PodSecurityPolicy that is most strict.
|
||||
func restrictedPSP(name string) *extensionsv1beta1.PodSecurityPolicy {
|
||||
return &extensionsv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
seccomp.AllowedProfilesAnnotationKey: "docker/default",
|
||||
seccomp.DefaultProfileAnnotationKey: "docker/default",
|
||||
apparmor.AllowedProfilesAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
apparmor.DefaultProfileAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Spec: extensionsv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: false,
|
||||
AllowPrivilegeEscalation: utilpointer.BoolPtr(false),
|
||||
RequiredDropCapabilities: []v1.Capability{
|
||||
"AUDIT_WRITE",
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
"FOWNER",
|
||||
"FSETID",
|
||||
"KILL",
|
||||
"MKNOD",
|
||||
"NET_RAW",
|
||||
"SETGID",
|
||||
"SETUID",
|
||||
"SYS_CHROOT",
|
||||
},
|
||||
Volumes: []extensionsv1beta1.FSType{
|
||||
extensionsv1beta1.ConfigMap,
|
||||
extensionsv1beta1.EmptyDir,
|
||||
extensionsv1beta1.PersistentVolumeClaim,
|
||||
"projected",
|
||||
extensionsv1beta1.Secret,
|
||||
},
|
||||
HostNetwork: false,
|
||||
HostIPC: false,
|
||||
HostPID: false,
|
||||
RunAsUser: extensionsv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: extensionsv1beta1.RunAsUserStrategyMustRunAsNonRoot,
|
||||
},
|
||||
SELinux: extensionsv1beta1.SELinuxStrategyOptions{
|
||||
Rule: extensionsv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: extensionsv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: extensionsv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: extensionsv1beta1.FSGroupStrategyOptions{
|
||||
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
@ -31,6 +31,7 @@ go_library(
|
||||
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/autoscaling/OWNERS
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/autoscaling/OWNERS
generated
vendored
@ -1,14 +1,8 @@
|
||||
reviewers:
|
||||
- aleksandra-malinowska
|
||||
- bskiba
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
- mwielgus
|
||||
- sig-autoscaling-maintainers
|
||||
- wasylkowski
|
||||
approvers:
|
||||
- aleksandra-malinowska
|
||||
- bskiba
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
- mwielgus
|
||||
- sig-autoscaling-maintainers
|
||||
- wasylkowski
|
||||
|
148
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
148
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
@ -54,6 +54,7 @@ import (
|
||||
const (
|
||||
defaultTimeout = 3 * time.Minute
|
||||
resizeTimeout = 5 * time.Minute
|
||||
manualResizeTimeout = 6 * time.Minute
|
||||
scaleUpTimeout = 5 * time.Minute
|
||||
scaleUpTriggerTimeout = 2 * time.Minute
|
||||
scaleDownTimeout = 20 * time.Minute
|
||||
@ -245,36 +246,43 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with one n1-standard-4 machine")
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
|
||||
|
||||
By("Get memory available on new node, so we can account for it when creating RC")
|
||||
By("Getting memory available on new nodes, so we can account for it when creating RC")
|
||||
nodes := getPoolNodes(f, extraPoolName)
|
||||
Expect(len(nodes)).Should(Equal(1))
|
||||
extraMem := nodes[0].Status.Capacity[v1.ResourceMemory]
|
||||
extraMemMb := int((&extraMem).Value() / 1024 / 1024)
|
||||
Expect(len(nodes)).Should(Equal(extraNodes))
|
||||
extraMemMb := 0
|
||||
for _, node := range nodes {
|
||||
mem := node.Status.Capacity[v1.ResourceMemory]
|
||||
extraMemMb += int((&mem).Value() / 1024 / 1024)
|
||||
}
|
||||
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb+extraMemMb, false, defaultTimeout)
|
||||
By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
|
||||
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
|
||||
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify, that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
|
||||
func(size int) bool { return size >= nodeCount+extraNodes+1 }, scaleUpTimeout))
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
})
|
||||
|
||||
It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with one n1-standard-4 machine")
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
||||
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
|
||||
})
|
||||
@ -415,6 +423,15 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
}
|
||||
|
||||
if minSize == 0 {
|
||||
newSizes := make(map[string]int)
|
||||
for mig, size := range originalSizes {
|
||||
newSizes[mig] = size
|
||||
}
|
||||
newSizes[minMig] = 1
|
||||
setMigSizes(newSizes)
|
||||
}
|
||||
|
||||
removeLabels := func(nodesToClean sets.String) {
|
||||
By("Removing labels from nodes")
|
||||
for node := range nodesToClean {
|
||||
@ -436,7 +453,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
By("Waiting for new node to appear and annotating it")
|
||||
framework.WaitForGroupSize(minMig, int32(minSize+1))
|
||||
// Verify, that cluster size is increased
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
@ -495,23 +512,26 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with one n1-standard-4 machine")
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
||||
defer disableAutoscaler(extraPoolName, 1, 2)
|
||||
|
||||
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
|
||||
ReserveMemory(f, "memory-reservation", 2, int(2.5*float64(memAllocatableMb)), false, defaultTimeout)
|
||||
extraPods := extraNodes + 1
|
||||
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
|
||||
By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
|
||||
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
|
||||
// this issue.
|
||||
// TODO: Remove the extra time when GKE restart is fixed.
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+2, scaleUpTimeout+5*time.Minute))
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
|
||||
})
|
||||
|
||||
simpleScaleDownTest := func(unready int) {
|
||||
@ -528,7 +548,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
setMigSizes(newSizes)
|
||||
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
|
||||
func(size int) bool { return size >= increasedSize }, scaleUpTimeout, unready))
|
||||
func(size int) bool { return size >= increasedSize }, manualResizeTimeout, unready))
|
||||
|
||||
By("Some node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
|
||||
@ -551,9 +571,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-1", 3)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= increasedSize+3 }, scaleUpTimeout))
|
||||
func(size int) bool { return size >= increasedSize+extraNodes }, scaleUpTimeout))
|
||||
|
||||
By("Some node should be removed")
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
@ -561,7 +582,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
// this issue.
|
||||
// TODO: Remove the extra time when GKE restart is fixed.
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < increasedSize+3 }, scaleDownTimeout+10*time.Minute))
|
||||
func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute))
|
||||
})
|
||||
|
||||
It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
@ -662,23 +683,26 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
// verify the targeted node pool/MIG is of size 0
|
||||
gkeScaleToZero := func() {
|
||||
// GKE-specific setup
|
||||
By("Add a new node pool with 1 node and min size 0")
|
||||
By("Add a new node pool with size 1 and min size 0")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
|
||||
defer disableAutoscaler(extraPoolName, 0, 1)
|
||||
|
||||
ngNodes := getPoolNodes(f, extraPoolName)
|
||||
Expect(len(ngNodes) == 1).To(BeTrue())
|
||||
node := ngNodes[0]
|
||||
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
Expect(len(ngNodes)).To(Equal(extraNodes))
|
||||
for _, node := range ngNodes {
|
||||
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
}
|
||||
|
||||
// this part is identical
|
||||
drainNode(f, node)
|
||||
for _, node := range ngNodes {
|
||||
drainNode(f, node)
|
||||
}
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < nodeCount+1 }, scaleDownTimeout))
|
||||
func(size int) bool { return size <= nodeCount }, scaleDownTimeout))
|
||||
|
||||
// GKE-specific check
|
||||
newSize := getPoolSize(f, extraPoolName)
|
||||
@ -964,10 +988,10 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
}
|
||||
_, err = f.ClientSet.Policy().PodDisruptionBudgets(namespace).Create(pdb)
|
||||
_, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
|
||||
|
||||
defer func() {
|
||||
f.ClientSet.Policy().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{})
|
||||
f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{})
|
||||
}()
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
@ -1041,7 +1065,7 @@ func getClusterLocation() string {
|
||||
}
|
||||
}
|
||||
|
||||
func getGcloudCommand(commandTrack string, args []string) []string {
|
||||
func getGcloudCommandFromTrack(commandTrack string, args []string) []string {
|
||||
command := []string{"gcloud"}
|
||||
if commandTrack == "beta" || commandTrack == "alpha" {
|
||||
command = append(command, commandTrack)
|
||||
@ -1052,6 +1076,14 @@ func getGcloudCommand(commandTrack string, args []string) []string {
|
||||
return command
|
||||
}
|
||||
|
||||
func getGcloudCommand(args []string) []string {
|
||||
track := ""
|
||||
if isRegionalCluster() {
|
||||
track = "beta"
|
||||
}
|
||||
return getGcloudCommandFromTrack(track, args)
|
||||
}
|
||||
|
||||
func isRegionalCluster() bool {
|
||||
// TODO(bskiba): Use an appropriate indicator that the cluster is regional.
|
||||
return framework.TestContext.CloudConfig.MultiZone
|
||||
@ -1065,11 +1097,7 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error {
|
||||
"--min-nodes=" + strconv.Itoa(minCount),
|
||||
"--max-nodes=" + strconv.Itoa(maxCount),
|
||||
"--node-pool=" + nodePool}
|
||||
track := ""
|
||||
if isRegionalCluster() {
|
||||
track = "beta"
|
||||
}
|
||||
output, err := execCmd(getGcloudCommand(track, args)...).CombinedOutput()
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed config update result: %s", output)
|
||||
@ -1093,11 +1121,7 @@ func disableAutoscaler(nodePool string, minCount, maxCount int) error {
|
||||
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
|
||||
"--no-enable-autoscaling",
|
||||
"--node-pool=" + nodePool}
|
||||
track := ""
|
||||
if isRegionalCluster() {
|
||||
track = "beta"
|
||||
}
|
||||
output, err := execCmd(getGcloudCommand(track, args)...).CombinedOutput()
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed config update result: %s", output)
|
||||
@ -1209,10 +1233,8 @@ func disableAutoprovisioning() error {
|
||||
|
||||
func getNAPNodePools() ([]string, error) {
|
||||
if framework.ProviderIs("gke") {
|
||||
output, err := exec.Command("gcloud", "container", "node-pools", "list",
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone,
|
||||
"--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput()
|
||||
args := []string{"container", "node-pools", "list", "--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance groups: %v", string(output))
|
||||
return nil, err
|
||||
@ -1273,16 +1295,16 @@ func addNodePool(name string, machineType string, numNodes int) {
|
||||
"--machine-type=" + machineType,
|
||||
"--num-nodes=" + strconv.Itoa(numNodes),
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand("alpha", args)...).CombinedOutput()
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
glog.Infof("Creating node-pool %s: %s", name, output)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
}
|
||||
|
||||
func deleteNodePool(name string) {
|
||||
glog.Infof("Deleting node pool %s", name)
|
||||
args := []string{"container", "node-pools", "delete", name, "--quiet",
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand("alpha", args)...).CombinedOutput()
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Infof("Error: %v", err)
|
||||
}
|
||||
@ -1300,6 +1322,32 @@ func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
|
||||
return nodes
|
||||
}
|
||||
|
||||
// getPoolInitialSize returns the initial size of the node pool taking into
|
||||
// account that it may span multiple zones. In that case, node pool consists of
|
||||
// multiple migs all containing initialNodeCount nodes.
|
||||
func getPoolInitialSize(poolName string) int {
|
||||
// get initial node count
|
||||
args := []string{"container", "node-pools", "describe", poolName, "--quiet",
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
|
||||
"--format=value(initialNodeCount)"}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
glog.Infof("Node-pool initial size: %s", output)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
fields := strings.Fields(string(output))
|
||||
Expect(len(fields)).Should(Equal(1))
|
||||
size, err := strconv.ParseInt(fields[0], 10, 64)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// get number of node pools
|
||||
args = []string{"container", "node-pools", "describe", poolName, "--quiet",
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
|
||||
"--format=value(instanceGroupUrls)"}
|
||||
output, err = execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
framework.ExpectNoError(err, string(output))
|
||||
nodeGroupCount := len(strings.Split(string(output), ";"))
|
||||
return int(size) * nodeGroupCount
|
||||
}
|
||||
|
||||
func getPoolSize(f *framework.Framework, poolName string) int {
|
||||
size := 0
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
@ -1769,7 +1817,7 @@ func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[strin
|
||||
return false
|
||||
}
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, checkClusterSize, scaleUpTimeout))
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, checkClusterSize, manualResizeTimeout))
|
||||
return increasedSize
|
||||
}
|
||||
|
||||
@ -1904,7 +1952,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
||||
var finalErr error
|
||||
for _, newPdbName := range newPdbs {
|
||||
By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName))
|
||||
err := f.ClientSet.Policy().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
|
||||
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
// log error, but attempt to remove other pdbs
|
||||
glog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err)
|
||||
@ -1942,7 +1990,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.Policy().PodDisruptionBudgets("kube-system").Create(pdb)
|
||||
_, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(pdb)
|
||||
newPdbs = append(newPdbs, pdbName)
|
||||
|
||||
if err != nil {
|
||||
|
177
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_autoscaling.go
generated
vendored
177
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_autoscaling.go
generated
vendored
@ -20,18 +20,19 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2/google"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
as "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -46,15 +47,67 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
var kubeClient clientset.Interface
|
||||
|
||||
It("should autoscale with Custom Metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
kubeClient = f.ClientSet
|
||||
testHPA(f, kubeClient)
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.SimpleStackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
customMetricTest(f, f.ClientSet, simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
pod := monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue)
|
||||
customMetricTest(f, f.ClientSet, objectHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, pod, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
customMetricTest(f, f.ClientSet, simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
scaledReplicas := 3
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := int64(100)
|
||||
metric1Target := 2 * metric1Value
|
||||
// metric2 should cause a scale up
|
||||
metric2Value := int64(200)
|
||||
metric2Target := int64(0.5 * float64(metric2Value))
|
||||
containers := []monitoring.CustomMetricContainerSpec{
|
||||
{
|
||||
Name: "stackdriver-exporter-metric1",
|
||||
MetricName: "metric1",
|
||||
MetricValue: metric1Value,
|
||||
},
|
||||
{
|
||||
Name: "stackdriver-exporter-metric2",
|
||||
MetricName: "metric2",
|
||||
MetricValue: metric2Value,
|
||||
},
|
||||
}
|
||||
metricTargets := map[string]int64{"metric1": metric1Target, "metric2": metric2Target}
|
||||
deployment := monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers)
|
||||
customMetricTest(f, f.ClientSet, podsHPA(f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, metricTargets), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
})
|
||||
|
||||
func testHPA(f *framework.Framework, kubeClient clientset.Interface) {
|
||||
func customMetricTest(f *framework.Framework, kubeClient clientset.Interface, hpa *as.HorizontalPodAutoscaler,
|
||||
deployment *extensions.Deployment, pod *corev1.Pod, initialReplicas, scaledReplicas int) {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
@ -92,80 +145,89 @@ func testHPA(f *framework.Framework, kubeClient clientset.Interface) {
|
||||
defer monitoring.CleanupAdapter()
|
||||
|
||||
// Run application that exports the metric
|
||||
err = createDeploymentsToScale(f, kubeClient)
|
||||
err = createDeploymentToScale(f, kubeClient, deployment, pod)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
|
||||
}
|
||||
defer cleanupDeploymentsToScale(f, kubeClient)
|
||||
defer cleanupDeploymentsToScale(f, kubeClient, deployment, pod)
|
||||
|
||||
// Autoscale the deployments
|
||||
err = createPodsHPA(f, kubeClient)
|
||||
// Wait for the deployment to run
|
||||
waitForReplicas(deployment.ObjectMeta.Name, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, initialReplicas)
|
||||
|
||||
// Autoscale the deployment
|
||||
_, err = kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(hpa)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create 'Pods' HPA: %v", err)
|
||||
}
|
||||
err = createObjectHPA(f, kubeClient)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create 'Objects' HPA: %v", err)
|
||||
framework.Failf("Failed to create HPA: %v", err)
|
||||
}
|
||||
|
||||
waitForReplicas(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, 1)
|
||||
waitForReplicas(dummyDeploymentName, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, 1)
|
||||
waitForReplicas(deployment.ObjectMeta.Name, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, scaledReplicas)
|
||||
}
|
||||
|
||||
func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) error {
|
||||
_, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.Name, 2, 100))
|
||||
if err != nil {
|
||||
return err
|
||||
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) error {
|
||||
if deployment != nil {
|
||||
_, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100))
|
||||
if err != nil {
|
||||
return err
|
||||
if pod != nil {
|
||||
_, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.Name, 2, 100))
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface) {
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterDeployment, &metav1.DeleteOptions{})
|
||||
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterPod, &metav1.DeleteOptions{})
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(dummyDeploymentName, &metav1.DeleteOptions{})
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) {
|
||||
if deployment != nil {
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
if pod != nil {
|
||||
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(pod.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
func createPodsHPA(f *framework.Framework, cs clientset.Interface) error {
|
||||
func simplePodsHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
return podsHPA(namespace, stackdriverExporterDeployment, map[string]int64{monitoring.CustomMetricName: metricTarget})
|
||||
}
|
||||
|
||||
func podsHPA(namespace string, deploymentName string, metricTargets map[string]int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
_, err := cs.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(&as.HorizontalPodAutoscaler{
|
||||
metrics := []as.MetricSpec{}
|
||||
for metric, target := range metricTargets {
|
||||
metrics = append(metrics, as.MetricSpec{
|
||||
Type: as.PodsMetricSourceType,
|
||||
Pods: &as.PodsMetricSource{
|
||||
MetricName: metric,
|
||||
TargetAverageValue: *resource.NewQuantity(target, resource.DecimalSI),
|
||||
},
|
||||
})
|
||||
}
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-pods-hpa",
|
||||
Namespace: f.Namespace.ObjectMeta.Name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: []as.MetricSpec{
|
||||
{
|
||||
Type: as.PodsMetricSourceType,
|
||||
Pods: &as.PodsMetricSource{
|
||||
MetricName: monitoring.CustomMetricName,
|
||||
TargetAverageValue: *resource.NewQuantity(200, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
Metrics: metrics,
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: stackdriverExporterDeployment,
|
||||
Name: deploymentName,
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func createObjectHPA(f *framework.Framework, cs clientset.Interface) error {
|
||||
func objectHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
_, err := cs.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(&as.HorizontalPodAutoscaler{
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-objects-hpa",
|
||||
Namespace: f.Namespace.ObjectMeta.Name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: []as.MetricSpec{
|
||||
@ -177,7 +239,7 @@ func createObjectHPA(f *framework.Framework, cs clientset.Interface) error {
|
||||
Kind: "Pod",
|
||||
Name: stackdriverExporterPod,
|
||||
},
|
||||
TargetValue: *resource.NewQuantity(200, resource.DecimalSI),
|
||||
TargetValue: *resource.NewQuantity(metricTarget, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -189,14 +251,13 @@ func createObjectHPA(f *framework.Framework, cs clientset.Interface) error {
|
||||
Name: dummyDeploymentName,
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
deployment, err := cs.Extensions().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := cs.ExtensionsV1beta1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/autoscaling/dns_autoscaling.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/autoscaling/dns_autoscaling.go
generated
vendored
@ -104,7 +104,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
By("Restoring intial dns autoscaling parameters")
|
||||
By("Restoring initial dns autoscaling parameters")
|
||||
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for number of running and ready kube-dns pods recover")
|
||||
@ -157,13 +157,14 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
|
||||
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #55779 is fixed.
|
||||
It("[DisabledForLargeClusters] kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
|
||||
|
||||
By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
By("Restoring intial dns autoscaling parameters")
|
||||
By("Restoring initial dns autoscaling parameters")
|
||||
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
|
||||
}()
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
@ -66,8 +66,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
||||
})
|
||||
})
|
||||
|
||||
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #54637 is fixed.
|
||||
SIGDescribe("[DisabledForLargeClusters] ReplicationController light", func() {
|
||||
SIGDescribe("ReplicationController light", func() {
|
||||
It("Should scale from 1 pod to 2 pods", func() {
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 1,
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/chaosmonkey/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/chaosmonkey/BUILD
generated
vendored
@ -16,8 +16,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["chaosmonkey_test.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/chaosmonkey",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["e2e"],
|
||||
)
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/common/apparmor.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/common/apparmor.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@ -185,7 +186,7 @@ func createAppArmorProfileLoader(f *framework.Framework) {
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{
|
||||
Name: "apparmor-loader",
|
||||
Image: "gcr.io/google_containers/apparmor-loader:0.1",
|
||||
Image: imageutils.GetE2EImage(imageutils.AppArmorLoader),
|
||||
Args: []string{"-poll", "10s", "/profiles"},
|
||||
SecurityContext: &api.SecurityContext{
|
||||
Privileged: &True,
|
||||
|
91
vendor/k8s.io/kubernetes/test/e2e/common/configmap_volume.go
generated
vendored
91
vendor/k8s.io/kubernetes/test/e2e/common/configmap_volume.go
generated
vendored
@ -184,6 +184,97 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
|
||||
})
|
||||
|
||||
It("binary data should be reflected in volume ", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
|
||||
name := "configmap-test-upd-" + string(uuid.NewUUID())
|
||||
volumeName := "configmap-volume"
|
||||
volumeMountPath := "/etc/configmap-volume"
|
||||
containerName1 := "configmap-volume-data-test"
|
||||
containerName2 := "configmap-volume-binary-test"
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
BinaryData: map[string][]byte{
|
||||
"dump.bin": {0xde, 0xca, 0xfe, 0xba, 0xd0, 0xfe, 0xff},
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName1,
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: containerName2,
|
||||
Image: "busybox",
|
||||
Command: []string{"hexdump", "-C", "/etc/configmap-volume/dump.bin"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollLogs1 := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName1)
|
||||
}
|
||||
pollLogs2 := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName2)
|
||||
}
|
||||
|
||||
By("Waiting for pod with text data")
|
||||
Eventually(pollLogs1, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
By("Waiting for pod with binary data")
|
||||
Eventually(pollLogs2, podLogTimeout, framework.Poll).Should(ContainSubstring("de ca fe ba d0 fe ff"))
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-CUD-test
|
||||
Description: Make sure Create, Update, Delete operations are all working
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/common/container_probe.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/common/container_probe.go
generated
vendored
@ -263,7 +263,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
TimeoutSeconds: 5,
|
||||
FailureThreshold: 1,
|
||||
FailureThreshold: 5, // to accommodate nodes which are slow in bringing up containers.
|
||||
},
|
||||
},
|
||||
},
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/common/docker_containers.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/common/docker_containers.go
generated
vendored
@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
|
||||
Container, ensure that it takes precedent to the docker image's default
|
||||
command.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to override the image's default commmand (docker entrypoint) ", func() {
|
||||
framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) ", func() {
|
||||
pod := entrypointTestPod()
|
||||
pod.Spec.Containers[0].Command = []string{"/ep-2"}
|
||||
|
||||
|
46
vendor/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go
generated
vendored
46
vendor/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go
generated
vendored
@ -46,7 +46,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide podname only ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("%s\n", podName),
|
||||
@ -55,31 +55,31 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-set-default-mode
|
||||
Description: Ensure that downward API can set default file premission
|
||||
Description: Ensure that downward API can set default file permission
|
||||
mode for DownwardAPIVolumeFiles if no mode is specified.
|
||||
*/
|
||||
framework.ConformanceIt("should set DefaultMode on files ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
defaultMode := int32(0400)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", nil, &defaultMode)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podname\": -r--------",
|
||||
"mode of file \"/etc/podinfo/podname\": -r--------",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-set-mode
|
||||
Description: Ensure that downward API can set file premission mode for
|
||||
Description: Ensure that downward API can set file permission mode for
|
||||
DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should set mode on item file ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
mode := int32(0400)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podname\": -r--------",
|
||||
"mode of file \"/etc/podinfo/podname\": -r--------",
|
||||
})
|
||||
})
|
||||
|
||||
@ -87,7 +87,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
uid := int64(1001)
|
||||
gid := int64(1234)
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
RunAsUser: &uid,
|
||||
FSGroup: &gid,
|
||||
@ -102,13 +102,13 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
uid := int64(1001)
|
||||
gid := int64(1234)
|
||||
mode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
RunAsUser: &uid,
|
||||
FSGroup: &gid,
|
||||
}
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podname\": -r--r-----",
|
||||
"mode of file \"/etc/podinfo/podname\": -r--r-----",
|
||||
})
|
||||
})
|
||||
|
||||
@ -123,7 +123,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
labels["key2"] = "value2"
|
||||
|
||||
podName := "labelsupdate" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/labels")
|
||||
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
|
||||
containerName := "client-container"
|
||||
By("Creating the pod")
|
||||
podClient.CreateSync(pod)
|
||||
@ -153,7 +153,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
annotations := map[string]string{}
|
||||
annotations["builder"] = "bar"
|
||||
podName := "annotationupdate" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/annotations")
|
||||
pod := downwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/podinfo/annotations")
|
||||
|
||||
containerName := "client-container"
|
||||
By("Creating the pod")
|
||||
@ -185,7 +185,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu limit ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_limit")
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("2\n"),
|
||||
@ -199,7 +199,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory limit ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_limit")
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("67108864\n"),
|
||||
@ -213,7 +213,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu request ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_request")
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("1\n"),
|
||||
@ -227,7 +227,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory request ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_request")
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("33554432\n"),
|
||||
@ -242,7 +242,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/cpu_limit")
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
|
||||
})
|
||||
@ -255,7 +255,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/memory_limit")
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
|
||||
})
|
||||
@ -273,7 +273,7 @@ func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMod
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
MountPath: "/etc/podinfo",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -299,7 +299,7 @@ func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
MountPath: "/etc/podinfo",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
@ -340,7 +340,7 @@ func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
MountPath: "/etc/podinfo",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
@ -358,7 +358,7 @@ func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
MountPath: "/etc/podinfo",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -377,7 +377,7 @@ func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[stri
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
MountPath: "/etc/podinfo",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/common/pods.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/common/pods.go
generated
vendored
@ -658,7 +658,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x
|
||||
|
||||
// wait for a delay == capped delay of MaxContainerBackOff
|
||||
By("geting restart delay when capped")
|
||||
By("getting restart delay when capped")
|
||||
var (
|
||||
delay1 time.Duration
|
||||
err error
|
||||
|
46
vendor/k8s.io/kubernetes/test/e2e/common/projected.go
generated
vendored
46
vendor/k8s.io/kubernetes/test/e2e/common/projected.go
generated
vendored
@ -471,7 +471,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: projected-volume-configMaps-updated-succesfully
|
||||
Testname: projected-volume-configMaps-updated-successfully
|
||||
Description: Make sure that if a projected volume has configMaps,
|
||||
that the values in these configMaps can be updated, deleted,
|
||||
and created.
|
||||
@ -560,7 +560,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: projected-volume-optional-configMaps-updated-succesfully
|
||||
Testname: projected-volume-optional-configMaps-updated-successfully
|
||||
Description: Make sure that if a projected volume has optional
|
||||
configMaps, that the values in these configMaps can be updated,
|
||||
deleted, and created.
|
||||
@ -866,7 +866,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide podname only", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("%s\n", podName),
|
||||
@ -875,32 +875,32 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
|
||||
/*
|
||||
Testname: projected-downwardapi-volume-set-default-mode
|
||||
Description: Ensure that downward API can set default file premission
|
||||
Description: Ensure that downward API can set default file permission
|
||||
mode for DownwardAPIVolumeFiles if no mode is specified in a projected
|
||||
volume.
|
||||
*/
|
||||
framework.ConformanceIt("should set DefaultMode on files", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
defaultMode := int32(0400)
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podname", nil, &defaultMode)
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podname\": -r--------",
|
||||
"mode of file \"/etc/podinfo/podname\": -r--------",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: projected-downwardapi-volume-set-mode
|
||||
Description: Ensure that downward API can set file premission mode for
|
||||
Description: Ensure that downward API can set file permission mode for
|
||||
DownwardAPIVolumeFiles in a projected volume.
|
||||
*/
|
||||
framework.ConformanceIt("should set mode on item file", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
mode := int32(0400)
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podname\": -r--------",
|
||||
"mode of file \"/etc/podinfo/podname\": -r--------",
|
||||
})
|
||||
})
|
||||
|
||||
@ -908,7 +908,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
uid := int64(1001)
|
||||
gid := int64(1234)
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
RunAsUser: &uid,
|
||||
FSGroup: &gid,
|
||||
@ -923,13 +923,13 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
uid := int64(1001)
|
||||
gid := int64(1234)
|
||||
mode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
RunAsUser: &uid,
|
||||
FSGroup: &gid,
|
||||
}
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podname\": -r--r-----",
|
||||
"mode of file \"/etc/podinfo/podname\": -r--r-----",
|
||||
})
|
||||
})
|
||||
|
||||
@ -945,7 +945,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
labels["key2"] = "value2"
|
||||
|
||||
podName := "labelsupdate" + string(uuid.NewUUID())
|
||||
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/labels")
|
||||
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
|
||||
containerName := "client-container"
|
||||
By("Creating the pod")
|
||||
podClient.CreateSync(pod)
|
||||
@ -976,7 +976,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
annotations := map[string]string{}
|
||||
annotations["builder"] = "bar"
|
||||
podName := "annotationupdate" + string(uuid.NewUUID())
|
||||
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/annotations")
|
||||
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/podinfo/annotations")
|
||||
|
||||
containerName := "client-container"
|
||||
By("Creating the pod")
|
||||
@ -1008,7 +1008,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu limit", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_limit")
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("2\n"),
|
||||
@ -1022,7 +1022,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory limit", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_limit")
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("67108864\n"),
|
||||
@ -1036,7 +1036,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu request", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_request")
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("1\n"),
|
||||
@ -1050,7 +1050,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory request", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_request")
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("33554432\n"),
|
||||
@ -1065,7 +1065,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/cpu_limit")
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
|
||||
})
|
||||
@ -1078,7 +1078,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/memory_limit")
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
|
||||
})
|
||||
@ -1495,7 +1495,7 @@ func projectedDownwardAPIVolumePodForModeTest(name, filePath string, itemMode, d
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
MountPath: "/etc/podinfo",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1517,11 +1517,11 @@ func projectedDownwardAPIVolumePodForUpdateTest(name string, labels, annotations
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=1200", "--file_content_in_loop=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
MountPath: "/etc/podinfo",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/common/util.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/common/util.go
generated
vendored
@ -53,6 +53,7 @@ var CurrentSuite Suite
|
||||
var CommonImageWhiteList = sets.NewString(
|
||||
"busybox",
|
||||
imageutils.GetE2EImage(imageutils.EntrypointTester),
|
||||
imageutils.GetE2EImage(imageutils.IpcUtils),
|
||||
imageutils.GetE2EImage(imageutils.Liveness),
|
||||
imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
imageutils.GetE2EImage(imageutils.MounttestUser),
|
||||
@ -61,9 +62,9 @@ var CommonImageWhiteList = sets.NewString(
|
||||
imageutils.GetE2EImage(imageutils.ServeHostname),
|
||||
imageutils.GetE2EImage(imageutils.TestWebserver),
|
||||
imageutils.GetE2EImage(imageutils.Hostexec),
|
||||
"gcr.io/google_containers/volume-nfs:0.8",
|
||||
"gcr.io/google_containers/volume-gluster:0.2",
|
||||
"gcr.io/google_containers/e2e-net-amd64:1.0",
|
||||
imageutils.GetE2EImage(imageutils.VolumeNFSServer),
|
||||
imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
|
||||
imageutils.GetE2EImage(imageutils.E2ENet),
|
||||
)
|
||||
|
||||
func svcByName(name string, port int) *v1.Service {
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/e2e.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/e2e.go
generated
vendored
@ -32,10 +32,10 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/util/logs"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/logs"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -56,7 +56,7 @@ func setupProviderConfig() error {
|
||||
glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.")
|
||||
|
||||
case "gce", "gke":
|
||||
framework.Logf("Fetching cloud provider for %q\r\n", framework.TestContext.Provider)
|
||||
framework.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider)
|
||||
zone := framework.TestContext.CloudConfig.Zone
|
||||
region := framework.TestContext.CloudConfig.Region
|
||||
|
||||
@ -72,7 +72,9 @@ func setupProviderConfig() error {
|
||||
managedZones = []string{zone}
|
||||
}
|
||||
|
||||
gceAlphaFeatureGate, err := gcecloud.NewAlphaFeatureGate([]string{gcecloud.AlphaFeatureNetworkEndpointGroup})
|
||||
gceAlphaFeatureGate, err := gcecloud.NewAlphaFeatureGate([]string{
|
||||
gcecloud.AlphaFeatureNetworkEndpointGroup,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Encountered error for creating alpha feature gate: %v", err)
|
||||
}
|
||||
@ -288,20 +290,20 @@ func gatherTestSuiteMetrics() error {
|
||||
}
|
||||
|
||||
metricsForE2E := (*framework.MetricsForE2E)(&received)
|
||||
metricsJson := metricsForE2E.PrintJSON()
|
||||
metricsJSON := metricsForE2E.PrintJSON()
|
||||
if framework.TestContext.ReportDir != "" {
|
||||
filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json")
|
||||
if err := ioutil.WriteFile(filePath, []byte(metricsJson), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil {
|
||||
return fmt.Errorf("error writing to %q: %v", filePath, err)
|
||||
}
|
||||
} else {
|
||||
framework.Logf("\n\nTest Suite Metrics:\n%s\n\n", metricsJson)
|
||||
framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestE2E checks configuration parameters (specified through flags) and then runs
|
||||
// RunE2ETests checks configuration parameters (specified through flags) and then runs
|
||||
// E2E tests using the Ginkgo runner.
|
||||
// If a "report directory" is specified, one or more JUnit test reports will be
|
||||
// generated in this directory, and cluster logs will also be saved.
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/e2e_test.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/e2e_test.go
generated
vendored
@ -31,7 +31,6 @@ import (
|
||||
_ "k8s.io/kubernetes/test/e2e/kubectl"
|
||||
_ "k8s.io/kubernetes/test/e2e/lifecycle"
|
||||
_ "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap"
|
||||
_ "k8s.io/kubernetes/test/e2e/multicluster"
|
||||
_ "k8s.io/kubernetes/test/e2e/network"
|
||||
_ "k8s.io/kubernetes/test/e2e/node"
|
||||
_ "k8s.io/kubernetes/test/e2e/scalability"
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/examples.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/examples.go
generated
vendored
@ -521,7 +521,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scaling rethinkdb")
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "rethinkdb-rc", 2, true)
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "rethinkdb-rc", 2, true)
|
||||
checkDbInstances()
|
||||
|
||||
By("starting admin")
|
||||
@ -564,7 +564,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scaling hazelcast")
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "hazelcast", 2, true)
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "hazelcast", 2, true)
|
||||
forEachPod("name", "hazelcast", func(pod v1.Pod) {
|
||||
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
29
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
29
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"])
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
@ -10,6 +11,7 @@ go_library(
|
||||
srcs = [
|
||||
"authorizer_util.go",
|
||||
"cleanup.go",
|
||||
"crd_util.go",
|
||||
"deployment_util.go",
|
||||
"exec_util.go",
|
||||
"firewall_util.go",
|
||||
@ -26,6 +28,7 @@ go_library(
|
||||
"nodes_util.go",
|
||||
"perf_util.go",
|
||||
"pods.go",
|
||||
"profile_gatherer.go",
|
||||
"psp_util.go",
|
||||
"pv_util.go",
|
||||
"rc_util.go",
|
||||
@ -47,7 +50,6 @@ go_library(
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
@ -57,9 +59,10 @@ go_library(
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/dockershim/metrics:go_default_library",
|
||||
@ -69,15 +72,15 @@ go_library(
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubemark:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
@ -101,7 +104,7 @@ go_library(
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
@ -109,7 +112,11 @@ go_library(
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
@ -131,14 +138,18 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
@ -165,3 +176,9 @@ filegroup(
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["firewall_util_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
|
126
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go
generated
vendored
Normal file
126
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/dynamic"
|
||||
)
|
||||
|
||||
// CleanCrdFn declares the clean up function needed to remove the CRD
|
||||
type CleanCrdFn func() error
|
||||
|
||||
// TestCrd holds all the pieces needed to test with the CRD
|
||||
type TestCrd struct {
|
||||
Name string
|
||||
Kind string
|
||||
ApiGroup string
|
||||
ApiVersion string
|
||||
ApiExtensionClient *crdclientset.Clientset
|
||||
Crd *apiextensionsv1beta1.CustomResourceDefinition
|
||||
DynamicClient dynamic.ResourceInterface
|
||||
CleanUp CleanCrdFn
|
||||
}
|
||||
|
||||
// CreateTestCRD creates a new CRD specifically for the calling test.
|
||||
func CreateTestCRD(f *Framework) (*TestCrd, error) {
|
||||
suffix := randomSuffix()
|
||||
name := fmt.Sprintf("e2e-test-%s-%s-crd", f.BaseName, suffix)
|
||||
kind := fmt.Sprintf("E2e-test-%s-%s-crd", f.BaseName, suffix)
|
||||
group := fmt.Sprintf("%s-crd-test.k8s.io", f.BaseName)
|
||||
apiVersion := "v1"
|
||||
testcrd := &TestCrd{
|
||||
Name: name,
|
||||
Kind: kind,
|
||||
ApiGroup: group,
|
||||
ApiVersion: apiVersion,
|
||||
}
|
||||
|
||||
// Creating a custom resource definition for use by assorted tests.
|
||||
config, err := LoadConfig()
|
||||
if err != nil {
|
||||
Failf("failed to load config: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
apiExtensionClient, err := crdclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
crd := newCRDForTest(testcrd)
|
||||
|
||||
//create CRD and waits for the resource to be recognized and available.
|
||||
dynamicClient, err := testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient, f.ClientPool)
|
||||
if err != nil {
|
||||
Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
resourceClient := dynamicClient.Resource(&metav1.APIResource{
|
||||
Name: crd.Spec.Names.Plural,
|
||||
Namespaced: true,
|
||||
}, f.Namespace.Name)
|
||||
|
||||
testcrd.ApiExtensionClient = apiExtensionClient
|
||||
testcrd.Crd = crd
|
||||
testcrd.DynamicClient = resourceClient
|
||||
testcrd.CleanUp = func() error {
|
||||
err := testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
|
||||
if err != nil {
|
||||
Failf("failed to delete CustomResourceDefinition(%s): %v", name, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return testcrd, nil
|
||||
}
|
||||
|
||||
// newCRDForTest generates a CRD definition for the test
|
||||
func newCRDForTest(testcrd *TestCrd) *apiextensionsv1beta1.CustomResourceDefinition {
|
||||
return &apiextensionsv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testcrd.GetMetaName()},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: testcrd.ApiGroup,
|
||||
Version: testcrd.ApiVersion,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: testcrd.GetPluralName(),
|
||||
Singular: testcrd.Name,
|
||||
Kind: testcrd.Kind,
|
||||
ListKind: testcrd.GetListName(),
|
||||
},
|
||||
Scope: apiextensionsv1beta1.NamespaceScoped,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetaName returns the metaname for the CRD.
|
||||
func (c *TestCrd) GetMetaName() string {
|
||||
return c.Name + "s." + c.ApiGroup
|
||||
}
|
||||
|
||||
// GetPluralName returns the plural form of the CRD name
|
||||
func (c *TestCrd) GetPluralName() string {
|
||||
return c.Name + "s"
|
||||
}
|
||||
|
||||
// GetListName returns the name for the CRD list resources
|
||||
func (c *TestCrd) GetListName() string {
|
||||
return c.Name + "List"
|
||||
}
|
24
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
24
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
@ -126,9 +127,9 @@ func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensio
|
||||
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
||||
return testutils.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, pollLongTimeout)
|
||||
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
||||
return testutils.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
@ -178,8 +179,8 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
|
||||
return err
|
||||
}
|
||||
|
||||
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
|
||||
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments"))
|
||||
}
|
||||
|
||||
func RunDeployment(config testutils.DeploymentConfig) error {
|
||||
@ -213,9 +214,9 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
|
||||
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
|
||||
}
|
||||
|
||||
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) {
|
||||
deploymentSpec := MakeDeployment(replicas, podLabels, namespace, pvclaims, false, command)
|
||||
deployment, err := client.Extensions().Deployments(namespace).Create(deploymentSpec)
|
||||
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) {
|
||||
deploymentSpec := MakeDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
|
||||
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(deploymentSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
|
||||
}
|
||||
@ -229,7 +230,7 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[
|
||||
|
||||
// MakeDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakeDeployment(replicas int32, podLabels map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment {
|
||||
func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
}
|
||||
@ -273,6 +274,9 @@ func MakeDeployment(replicas int32, podLabels map[string]string, namespace strin
|
||||
}
|
||||
deploymentSpec.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
deploymentSpec.Spec.Template.Spec.Volumes = volumes
|
||||
if nodeSelector != nil {
|
||||
deploymentSpec.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
}
|
||||
return deploymentSpec
|
||||
}
|
||||
|
||||
@ -286,7 +290,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Dep
|
||||
return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
|
||||
}
|
||||
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
return client.Core().Pods(namespace).List(options)
|
||||
return client.CoreV1().Pods(namespace).List(options)
|
||||
}
|
||||
rsList := []*extensions.ReplicaSet{replicaSet}
|
||||
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
|
||||
|
113
vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go
generated
vendored
113
vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -100,7 +99,7 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service,
|
||||
// GetInstanceTags gets tags from GCE instance with given name.
|
||||
func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
res, err := gceCloud.GetComputeService().Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
|
||||
res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
|
||||
instanceName).Do()
|
||||
if err != nil {
|
||||
Failf("Failed to get instance tags for %v: %v", instanceName, err)
|
||||
@ -113,7 +112,7 @@ func SetInstanceTags(cloudConfig CloudConfig, instanceName, zone string, tags []
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
// Re-get instance everytime because we need the latest fingerprint for updating metadata
|
||||
resTags := GetInstanceTags(cloudConfig, instanceName)
|
||||
_, err := gceCloud.GetComputeService().Instances.SetTags(
|
||||
_, err := gceCloud.ComputeServices().GA.Instances.SetTags(
|
||||
cloudConfig.ProjectID, zone, instanceName,
|
||||
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
||||
if err != nil {
|
||||
@ -303,6 +302,83 @@ func PackProtocolsPortsFromFirewall(alloweds []*compute.FirewallAllowed) []strin
|
||||
return protocolPorts
|
||||
}
|
||||
|
||||
type portRange struct {
|
||||
protocol string
|
||||
min, max int
|
||||
}
|
||||
|
||||
func toPortRange(s string) (pr portRange, err error) {
|
||||
protoPorts := strings.Split(s, "/")
|
||||
// Set protocol
|
||||
pr.protocol = strings.ToUpper(protoPorts[0])
|
||||
|
||||
if len(protoPorts) != 2 {
|
||||
return pr, fmt.Errorf("expected a single '/' in %q", s)
|
||||
}
|
||||
|
||||
ports := strings.Split(protoPorts[1], "-")
|
||||
switch len(ports) {
|
||||
case 1:
|
||||
v, err := strconv.Atoi(ports[0])
|
||||
if err != nil {
|
||||
return pr, err
|
||||
}
|
||||
pr.min, pr.max = v, v
|
||||
case 2:
|
||||
start, err := strconv.Atoi(ports[0])
|
||||
if err != nil {
|
||||
return pr, err
|
||||
}
|
||||
end, err := strconv.Atoi(ports[1])
|
||||
if err != nil {
|
||||
return pr, err
|
||||
}
|
||||
pr.min, pr.max = start, end
|
||||
default:
|
||||
return pr, fmt.Errorf("unexpected range value %q", protoPorts[1])
|
||||
}
|
||||
|
||||
return pr, nil
|
||||
}
|
||||
|
||||
// isPortsSubset asserts that the "requiredPorts" are covered by the "coverage" ports.
|
||||
// requiredPorts - must be single-port, examples: 'tcp/50', 'udp/80'.
|
||||
// coverage - single or port-range values, example: 'tcp/50', 'udp/80-1000'.
|
||||
// Returns true if every requiredPort exists in the list of coverage rules.
|
||||
func isPortsSubset(requiredPorts, coverage []string) error {
|
||||
for _, reqPort := range requiredPorts {
|
||||
rRange, err := toPortRange(reqPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rRange.min != rRange.max {
|
||||
return fmt.Errorf("requiring a range is not supported: %q", reqPort)
|
||||
}
|
||||
|
||||
var covered bool
|
||||
for _, c := range coverage {
|
||||
cRange, err := toPortRange(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rRange.protocol != cRange.protocol {
|
||||
continue
|
||||
}
|
||||
|
||||
if rRange.min >= cRange.min && rRange.min <= cRange.max {
|
||||
covered = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !covered {
|
||||
return fmt.Errorf("%q is not covered by %v", reqPort, coverage)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SameStringArray verifies whether two string arrays have the same strings, return error if not.
|
||||
// Order does not matter.
|
||||
// When `include` is set to true, verifies whether result includes all elements from expected.
|
||||
@ -335,10 +411,19 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
|
||||
if !strings.HasSuffix(res.Network, "/"+network) {
|
||||
return fmt.Errorf("incorrect network: %v, expected ends with: %v", res.Network, "/"+network)
|
||||
}
|
||||
if err := SameStringArray(PackProtocolsPortsFromFirewall(res.Allowed),
|
||||
PackProtocolsPortsFromFirewall(exp.Allowed), portsSubset); err != nil {
|
||||
return fmt.Errorf("incorrect allowed protocols ports: %v", err)
|
||||
|
||||
actualPorts := PackProtocolsPortsFromFirewall(res.Allowed)
|
||||
expPorts := PackProtocolsPortsFromFirewall(exp.Allowed)
|
||||
if portsSubset {
|
||||
if err := isPortsSubset(expPorts, actualPorts); err != nil {
|
||||
return fmt.Errorf("incorrect allowed protocol ports: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := SameStringArray(actualPorts, expPorts, false); err != nil {
|
||||
return fmt.Errorf("incorrect allowed protocols ports: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := SameStringArray(res.SourceRanges, exp.SourceRanges, false); err != nil {
|
||||
return fmt.Errorf("incorrect source ranges %v, expected %v: %v", res.SourceRanges, exp.SourceRanges, err)
|
||||
}
|
||||
@ -371,19 +456,3 @@ func WaitForFirewallRule(gceCloud *gcecloud.GCECloud, fwName string, exist bool,
|
||||
}
|
||||
return fw, nil
|
||||
}
|
||||
|
||||
func GetClusterID(c clientset.Interface) (string, error) {
|
||||
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
|
||||
if err != nil || cm == nil {
|
||||
return "", fmt.Errorf("error getting cluster ID: %v", err)
|
||||
}
|
||||
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
|
||||
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
|
||||
if !clusterIDExists {
|
||||
return "", fmt.Errorf("cluster ID not set")
|
||||
}
|
||||
if providerIDExists {
|
||||
return providerID, nil
|
||||
}
|
||||
return clusterID, nil
|
||||
}
|
||||
|
54
vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util_test.go
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util_test.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestIsPortsSubset(t *testing.T) {
|
||||
tc := map[string]struct {
|
||||
required []string
|
||||
coverage []string
|
||||
expectErr bool
|
||||
}{
|
||||
"Single port coverage": {
|
||||
required: []string{"tcp/50"},
|
||||
coverage: []string{"tcp/50", "tcp/60", "tcp/70"},
|
||||
},
|
||||
"Port range coverage": {
|
||||
required: []string{"tcp/50"},
|
||||
coverage: []string{"tcp/20-30", "tcp/45-60"},
|
||||
},
|
||||
"Multiple Port range coverage": {
|
||||
required: []string{"tcp/50", "tcp/29", "tcp/46"},
|
||||
coverage: []string{"tcp/20-30", "tcp/45-60"},
|
||||
},
|
||||
"Not covered": {
|
||||
required: []string{"tcp/50"},
|
||||
coverage: []string{"udp/50", "tcp/49", "tcp/51-60"},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, c := range tc {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
gotErr := isPortsSubset(c.required, c.coverage)
|
||||
if c.expectErr != (gotErr != nil) {
|
||||
t.Errorf("isPortsSubset(%v, %v) = %v, wanted err? %v", c.required, c.coverage, gotErr, c.expectErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
30
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -28,14 +28,19 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
@ -67,13 +72,15 @@ type Framework struct {
|
||||
AggregatorClient *aggregatorclient.Clientset
|
||||
ClientPool dynamic.ClientPool
|
||||
|
||||
ScalesGetter scaleclient.ScalesGetter
|
||||
|
||||
SkipNamespaceCreation bool // Whether to skip creating a namespace
|
||||
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
|
||||
namespacesToDelete []*v1.Namespace // Some tests have more than one.
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace
|
||||
|
||||
gatherer *containerResourceGatherer
|
||||
gatherer *ContainerResourceGatherer
|
||||
// Constraints that passed to a check which is executed after data is gathered to
|
||||
// see if 99% of results are within acceptable bounds. It has to be injected in the test,
|
||||
// as expectations vary greatly. Constraints are grouped by the container names.
|
||||
@ -161,6 +168,25 @@ func (f *Framework) BeforeEach() {
|
||||
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.ClientPool = dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
|
||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||
// as they are required when creating a REST client.
|
||||
if config.GroupVersion == nil {
|
||||
config.GroupVersion = &schema.GroupVersion{}
|
||||
}
|
||||
if config.NegotiatedSerializer == nil {
|
||||
config.NegotiatedSerializer = legacyscheme.Codecs
|
||||
}
|
||||
restClient, err := rest.RESTClientFor(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
||||
restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoClient, meta.InterfacesForUnstructured)
|
||||
restMapper.Reset()
|
||||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
|
||||
if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil {
|
||||
externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig)
|
||||
externalConfig.QPS = f.Options.ClientQPS
|
||||
@ -378,7 +404,7 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
f.namespacesToDelete = append(f.namespacesToDelete, ns)
|
||||
}
|
||||
|
||||
if !f.SkipPrivilegedPSPBinding {
|
||||
if err == nil && !f.SkipPrivilegedPSPBinding {
|
||||
CreatePrivilegedPSPBinding(f, ns.Name)
|
||||
}
|
||||
|
||||
|
102
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
102
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
@ -22,84 +22,12 @@ import (
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
)
|
||||
|
||||
// TODO: These should really just use the GCE API client library or at least use
|
||||
// better formatted output from the --format flag.
|
||||
|
||||
func CreateGCEStaticIP(name string) (string, error) {
|
||||
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
|
||||
// NAME REGION ADDRESS STATUS
|
||||
// test-static-ip us-central1 104.197.143.7 RESERVED
|
||||
|
||||
var outputBytes []byte
|
||||
var err error
|
||||
region, err := gce.GetGCERegion(TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to convert zone to region: %v", err)
|
||||
}
|
||||
glog.Infof("Creating static IP with name %q in project %q in region %q", name, TestContext.CloudConfig.ProjectID, region)
|
||||
for attempts := 0; attempts < 4; attempts++ {
|
||||
outputBytes, err = exec.Command("gcloud", "compute", "addresses", "create",
|
||||
name, "--project", TestContext.CloudConfig.ProjectID,
|
||||
"--region", region, "-q", "--format=yaml").CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
glog.Errorf("output from failed attempt to create static IP: %s", outputBytes)
|
||||
time.Sleep(time.Duration(5*attempts) * time.Second)
|
||||
}
|
||||
if err != nil {
|
||||
// Ditch the error, since the stderr in the output is what actually contains
|
||||
// any useful info.
|
||||
return "", fmt.Errorf("failed to create static IP: %s", outputBytes)
|
||||
}
|
||||
output := string(outputBytes)
|
||||
if strings.Contains(output, "RESERVED") {
|
||||
r, _ := regexp.Compile("[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+")
|
||||
staticIP := r.FindString(output)
|
||||
if staticIP == "" {
|
||||
return "", fmt.Errorf("static IP not found in gcloud command output: %v", output)
|
||||
} else {
|
||||
return staticIP, nil
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("static IP %q could not be reserved: %v", name, output)
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteGCEStaticIP(name string) error {
|
||||
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
|
||||
// NAME REGION ADDRESS STATUS
|
||||
// test-static-ip us-central1 104.197.143.7 RESERVED
|
||||
|
||||
region, err := gce.GetGCERegion(TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert zone to region: %v", err)
|
||||
}
|
||||
glog.Infof("Deleting static IP with name %q in project %q in region %q", name, TestContext.CloudConfig.ProjectID, region)
|
||||
outputBytes, err := exec.Command("gcloud", "compute", "addresses", "delete",
|
||||
name, "--project", TestContext.CloudConfig.ProjectID,
|
||||
"--region", region, "-q").CombinedOutput()
|
||||
if err != nil {
|
||||
// Ditch the error, since the stderr in the output is what actually contains
|
||||
// any useful info.
|
||||
return fmt.Errorf("failed to delete static IP %q: %v", name, string(outputBytes))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns master & node image string, or error
|
||||
func lookupClusterImageSources() (string, string, error) {
|
||||
// Given args for a gcloud compute command, run it with other args, and return the values,
|
||||
@ -196,3 +124,33 @@ func LogClusterImageSources() {
|
||||
Logf("cluster images sources, could not write to %q: %v", filePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
func CreateManagedInstanceGroup(size int64, zone, template string) error {
|
||||
// TODO(verult): make this hit the compute API directly instead of
|
||||
// shelling out to gcloud.
|
||||
_, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
|
||||
"create",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zone=%s", zone),
|
||||
TestContext.CloudConfig.NodeInstanceGroup,
|
||||
fmt.Sprintf("--size=%d", size),
|
||||
fmt.Sprintf("--template=%s", template))
|
||||
if err != nil {
|
||||
return fmt.Errorf("gcloud compute instance-groups managed create call failed with err: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteManagedInstanceGroup(zone string) error {
|
||||
// TODO(verult): make this hit the compute API directly instead of
|
||||
// shelling out to gcloud.
|
||||
_, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
|
||||
"delete",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zone=%s", zone),
|
||||
TestContext.CloudConfig.NodeInstanceGroup)
|
||||
if err != nil {
|
||||
return fmt.Errorf("gcloud compute instance-groups managed delete call failed with err: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
707
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
707
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
@ -158,7 +158,7 @@ func (g *MetricsGrabber) GrabFromControllerManager() (ControllerManagerMetrics,
|
||||
if !g.registeredMaster {
|
||||
return ControllerManagerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ControllerManager's metrics gathering.")
|
||||
}
|
||||
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName), metav1.NamespaceSystem, ports.ControllerManagerPort)
|
||||
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName), metav1.NamespaceSystem, ports.InsecureKubeControllerManagerPort)
|
||||
if err != nil {
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
|
34
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -40,6 +40,15 @@ func EtcdUpgrade(target_storage, target_version string) error {
|
||||
}
|
||||
}
|
||||
|
||||
func IngressUpgrade(isUpgrade bool) error {
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
return ingressUpgradeGCE(isUpgrade)
|
||||
default:
|
||||
return fmt.Errorf("IngressUpgrade() is not implemented for provider %s", TestContext.Provider)
|
||||
}
|
||||
}
|
||||
|
||||
func MasterUpgrade(v string) error {
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
@ -58,12 +67,33 @@ func etcdUpgradeGCE(target_storage, target_version string) error {
|
||||
os.Environ(),
|
||||
"TEST_ETCD_VERSION="+target_version,
|
||||
"STORAGE_BACKEND="+target_storage,
|
||||
"TEST_ETCD_IMAGE=3.1.10")
|
||||
"TEST_ETCD_IMAGE=3.2.14")
|
||||
|
||||
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
|
||||
return err
|
||||
}
|
||||
|
||||
func ingressUpgradeGCE(isUpgrade bool) error {
|
||||
var command string
|
||||
if isUpgrade {
|
||||
// User specified image to upgrade to.
|
||||
targetImage := TestContext.IngressUpgradeImage
|
||||
if targetImage != "" {
|
||||
command = fmt.Sprintf("sudo sed -i -re 's|(image:)(.*)|\\1 %s|' /etc/kubernetes/manifests/glbc.manifest", targetImage)
|
||||
} else {
|
||||
// Upgrade to latest HEAD image.
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 gcr.io\\/k8s-ingress-image-push\\/ingress-gce-e2e-glbc-amd64:latest/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
}
|
||||
} else {
|
||||
// Downgrade to latest release image.
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 k8s.gcr.io\\/google_containers\\/glbc:0.9.7/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
}
|
||||
// Kubelet should restart glbc automatically.
|
||||
sshResult, err := NodeExec(GetMasterHost(), command)
|
||||
LogSSHResult(sshResult)
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
|
||||
func MasterUpgradeGCEWithKubeProxyDaemonSet(v string, enableKubeProxyDaemonSet bool) error {
|
||||
return masterUpgradeGCE(v, enableKubeProxyDaemonSet)
|
||||
@ -77,7 +107,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
|
||||
env = append(env,
|
||||
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
|
||||
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
|
||||
"TEST_ETCD_IMAGE=3.1.10")
|
||||
"TEST_ETCD_IMAGE=3.2.14")
|
||||
} else {
|
||||
// In e2e tests, we skip the confirmation prompt about
|
||||
// implicit etcd upgrades to simulate the user entering "y".
|
||||
|
187
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
Normal file
187
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Default value for how long the CPU profile is gathered for.
|
||||
DefaultCPUProfileSeconds = 30
|
||||
)
|
||||
|
||||
func getProfilesDirectoryPath() string {
|
||||
return path.Join(TestContext.ReportDir, "profiles")
|
||||
}
|
||||
|
||||
func createProfilesDirectoryIfNeeded() error {
|
||||
profileDirPath := getProfilesDirectoryPath()
|
||||
if _, err := os.Stat(profileDirPath); os.IsNotExist(err) {
|
||||
if mkdirErr := os.Mkdir(profileDirPath, 0777); mkdirErr != nil {
|
||||
return fmt.Errorf("Failed to create profiles dir: %v", mkdirErr)
|
||||
}
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Failed to check existence of profiles dir: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkProfileGatheringPrerequisites() error {
|
||||
if !TestContext.AllowGatheringProfiles {
|
||||
return fmt.Errorf("Can't gather profiles as --allow-gathering-profiles is false")
|
||||
}
|
||||
if TestContext.ReportDir == "" {
|
||||
return fmt.Errorf("Can't gather profiles as --report-dir is empty")
|
||||
}
|
||||
if err := createProfilesDirectoryIfNeeded(); err != nil {
|
||||
return fmt.Errorf("Failed to ensure profiles dir: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherProfileOfKind(profileBaseName, kind string) error {
|
||||
// Get the profile data over SSH.
|
||||
getCommand := fmt.Sprintf("curl -s localhost:8080/debug/pprof/%s", kind)
|
||||
sshResult, err := SSH(getCommand, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to execute curl command on master through SSH: %v", err)
|
||||
}
|
||||
// Write the data to a temp file.
|
||||
var tmpfile *os.File
|
||||
tmpfile, err = ioutil.TempFile("", "apiserver-profile")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create temp file for profile data: %v", err)
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
if _, err := tmpfile.Write([]byte(sshResult.Stdout)); err != nil {
|
||||
return fmt.Errorf("Failed to write temp file with profile data: %v", err)
|
||||
}
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
return fmt.Errorf("Failed to close temp file: %v", err)
|
||||
}
|
||||
// Create a graph from the data and write it to a pdf file.
|
||||
var cmd *exec.Cmd
|
||||
var profilePrefix string
|
||||
switch {
|
||||
// TODO: Support other profile kinds if needed (e.g inuse_space, alloc_objects, mutex, etc)
|
||||
case kind == "heap":
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", "--alloc_space", tmpfile.Name())
|
||||
profilePrefix = "ApiserverMemoryProfile_"
|
||||
case strings.HasPrefix(kind, "profile"):
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", tmpfile.Name())
|
||||
profilePrefix = "ApiserverCPUProfile_"
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", kind)
|
||||
}
|
||||
outfilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pdf")
|
||||
var outfile *os.File
|
||||
outfile, err = os.Create(outfilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create file for the profile graph: %v", err)
|
||||
}
|
||||
defer outfile.Close()
|
||||
cmd.Stdout = outfile
|
||||
stderr := bytes.NewBuffer(nil)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); nil != err {
|
||||
return fmt.Errorf("Failed to run 'go tool pprof': %v, stderr: %#v", err, stderr.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The below exposed functions can take a while to execute as they SSH to the master,
|
||||
// collect and copy the profile over and then graph it. To allow waiting for these to
|
||||
// finish before the parent goroutine itself finishes, we accept a sync.WaitGroup
|
||||
// argument in these functions. Typically you would use the following pattern:
|
||||
//
|
||||
// func TestFooBar() {
|
||||
// var wg sync.WaitGroup
|
||||
// wg.Add(3)
|
||||
// go framework.GatherApiserverCPUProfile(&wg, "doing_foo")
|
||||
// go framework.GatherApiserverMemoryProfile(&wg, "doing_foo")
|
||||
// <<<< some code doing foo >>>>>>
|
||||
// go framework.GatherApiserverCPUProfile(&wg, "doing_bar")
|
||||
// <<<< some code doing bar >>>>>>
|
||||
// wg.Wait()
|
||||
// }
|
||||
//
|
||||
// If you do not wish to exercise the waiting logic, pass a nil value for the
|
||||
// waitgroup argument instead. However, then you would be responsible for ensuring
|
||||
// that the function finishes.
|
||||
|
||||
func GatherApiserverCPUProfile(wg *sync.WaitGroup, profileBaseName string) {
|
||||
GatherApiserverCPUProfileForNSeconds(wg, profileBaseName, DefaultCPUProfileSeconds)
|
||||
}
|
||||
|
||||
func GatherApiserverCPUProfileForNSeconds(wg *sync.WaitGroup, profileBaseName string, n int) {
|
||||
if wg != nil {
|
||||
defer wg.Done()
|
||||
}
|
||||
if err := checkProfileGatheringPrerequisites(); err != nil {
|
||||
Logf("Profile gathering pre-requisite failed: %v", err)
|
||||
return
|
||||
}
|
||||
if profileBaseName == "" {
|
||||
profileBaseName = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
if err := gatherProfileOfKind(profileBaseName, fmt.Sprintf("profile?seconds=%v", n)); err != nil {
|
||||
Logf("Failed to gather apiserver CPU profile: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func GatherApiserverMemoryProfile(wg *sync.WaitGroup, profileBaseName string) {
|
||||
if wg != nil {
|
||||
defer wg.Done()
|
||||
}
|
||||
if err := checkProfileGatheringPrerequisites(); err != nil {
|
||||
Logf("Profile gathering pre-requisite failed: %v", err)
|
||||
return
|
||||
}
|
||||
if profileBaseName == "" {
|
||||
profileBaseName = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
if err := gatherProfileOfKind(profileBaseName, "heap"); err != nil {
|
||||
Logf("Failed to gather apiserver memory profile: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// StartApiserverCPUProfileGatherer is a polling-based gatherer of the apiserver's
|
||||
// CPU profile. It takes the delay b/w consecutive gatherings as an argument and
|
||||
// starts the gathering goroutine. To stop the gatherer, close the returned channel.
|
||||
func StartApiserverCPUProfileGatherer(delay time.Duration) chan struct{} {
|
||||
stopCh := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
GatherApiserverCPUProfile(nil, "")
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return stopCh
|
||||
}
|
52
vendor/k8s.io/kubernetes/test/e2e/framework/psp_util.go
generated
vendored
52
vendor/k8s.io/kubernetes/test/e2e/framework/psp_util.go
generated
vendored
@ -114,30 +114,34 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
|
||||
psp, err = f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp)
|
||||
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
||||
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = f.ClientSet.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
|
||||
Rules: []rbacv1beta1.PolicyRule{{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{podSecurityPolicyPrivileged},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
})
|
||||
ExpectNoError(err, "Failed to create PSP role")
|
||||
if IsRBACEnabled(f) {
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = f.ClientSet.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
|
||||
Rules: []rbacv1beta1.PolicyRule{{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{podSecurityPolicyPrivileged},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
})
|
||||
ExpectNoError(err, "Failed to create PSP role")
|
||||
}
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
|
||||
podSecurityPolicyPrivileged, namespace))
|
||||
BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(),
|
||||
podSecurityPolicyPrivileged,
|
||||
namespace,
|
||||
rbacv1beta1.Subject{
|
||||
Kind: rbacv1beta1.ServiceAccountKind,
|
||||
Namespace: namespace,
|
||||
Name: "default",
|
||||
})
|
||||
ExpectNoError(WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
|
||||
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
|
||||
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
|
||||
if IsRBACEnabled(f) {
|
||||
By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
|
||||
podSecurityPolicyPrivileged, namespace))
|
||||
BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(),
|
||||
podSecurityPolicyPrivileged,
|
||||
namespace,
|
||||
rbacv1beta1.Subject{
|
||||
Kind: rbacv1beta1.ServiceAccountKind,
|
||||
Namespace: namespace,
|
||||
Name: "default",
|
||||
})
|
||||
ExpectNoError(WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
|
||||
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
|
||||
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
|
||||
}
|
||||
}
|
||||
|
69
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
69
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
@ -36,10 +36,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -80,7 +80,8 @@ type PersistentVolumeConfig struct {
|
||||
NamePrefix string
|
||||
Labels labels.Set
|
||||
StorageClassName string
|
||||
NodeAffinity *v1.NodeAffinity
|
||||
NodeAffinity *v1.VolumeNodeAffinity
|
||||
VolumeMode *v1.PersistentVolumeMode
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimConfig is consumed by MakePersistentVolumeClaim() to generate a PVC object.
|
||||
@ -92,6 +93,7 @@ type PersistentVolumeClaimConfig struct {
|
||||
Annotations map[string]string
|
||||
Selector *metav1.LabelSelector
|
||||
StorageClassName *string
|
||||
VolumeMode *v1.PersistentVolumeMode
|
||||
}
|
||||
|
||||
// Clean up a pv and pvc in a single pv/pvc test case.
|
||||
@ -181,7 +183,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
|
||||
|
||||
// Wait for the PV's phase to return to be `expectPVPhase`
|
||||
Logf("Waiting for reclaim process to complete.")
|
||||
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, 1*time.Second, 300*time.Second)
|
||||
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, Poll, PVReclaimingTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
|
||||
}
|
||||
@ -262,6 +264,11 @@ func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVol
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
// create the PV resource. Fails test on error.
|
||||
func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||
return createPV(c, pv)
|
||||
}
|
||||
|
||||
// create the PVC resource. Fails test on error.
|
||||
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
|
||||
@ -391,14 +398,14 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
|
||||
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
|
||||
// Wait for newly created PVC to bind to the PV
|
||||
Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, Poll, ClaimBindingTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
|
||||
// since the PVC is already bound.
|
||||
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, Poll, PVBindingTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
|
||||
}
|
||||
@ -444,7 +451,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
|
||||
}
|
||||
|
||||
for pvName := range pvols {
|
||||
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, 3*time.Second, 180*time.Second)
|
||||
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, Poll, PVBindingTimeout)
|
||||
if err != nil && len(pvols) > len(claims) {
|
||||
Logf("WARN: pv %v is not bound after max wait", pvName)
|
||||
Logf(" This may be ok since there are more pvs than pvcs")
|
||||
@ -467,7 +474,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
|
||||
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
|
||||
}
|
||||
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, Poll, ClaimBindingTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
|
||||
}
|
||||
@ -576,12 +583,12 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
|
||||
Namespace: pvConfig.Prebind.Namespace,
|
||||
}
|
||||
}
|
||||
pv := &v1.PersistentVolume{
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: pvConfig.NamePrefix,
|
||||
Labels: pvConfig.Labels,
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeGidAnnotationKey: "777",
|
||||
util.VolumeGidAnnotationKey: "777",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
@ -597,14 +604,10 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
|
||||
},
|
||||
ClaimRef: claimRef,
|
||||
StorageClassName: pvConfig.StorageClassName,
|
||||
NodeAffinity: pvConfig.NodeAffinity,
|
||||
VolumeMode: pvConfig.VolumeMode,
|
||||
},
|
||||
}
|
||||
err := helper.StorageNodeAffinityToAlphaAnnotation(pv.Annotations, pvConfig.NodeAffinity)
|
||||
if err != nil {
|
||||
Logf("Setting storage node affinity failed: %v", err)
|
||||
return nil
|
||||
}
|
||||
return pv
|
||||
}
|
||||
|
||||
// Returns a PVC definition based on the namespace.
|
||||
@ -634,6 +637,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
|
||||
},
|
||||
},
|
||||
StorageClassName: cfg.StorageClassName,
|
||||
VolumeMode: cfg.VolumeMode,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -856,7 +860,7 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
@ -867,14 +871,21 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumeMounts = make([]v1.VolumeMount, 0)
|
||||
var volumeDevices = make([]v1.VolumeDevice, 0)
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
|
||||
} else {
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
}
|
||||
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
|
||||
podSpec.Spec.Volumes = volumes
|
||||
podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel
|
||||
return podSpec
|
||||
@ -925,6 +936,26 @@ func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeC
|
||||
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
||||
}
|
||||
|
||||
// CreateUnschedulablePod with given claims based on node selector
|
||||
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
|
||||
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to become Unschedulable
|
||||
err = WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// wait until all pvcs phase set to bound
|
||||
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) {
|
||||
persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims))
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
@ -84,7 +85,9 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
|
||||
|
||||
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
||||
// none are running, otherwise it does what a synchronous scale operation would do.
|
||||
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error {
|
||||
//TODO(p0lyn0mial): remove internalClientset.
|
||||
//TODO(p0lyn0mial): update the callers.
|
||||
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error {
|
||||
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
|
||||
rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts)
|
||||
if err != nil {
|
||||
@ -96,7 +99,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl
|
||||
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
|
||||
for _, labelRC := range rcs.Items {
|
||||
name := labelRC.Name
|
||||
if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil {
|
||||
if err := ScaleRC(clientset, internalClientset, scalesGetter, ns, name, replicas, false); err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
@ -156,8 +159,8 @@ func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalcl
|
||||
return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, ns, name, size, wait, api.Kind("ReplicationController"))
|
||||
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers"))
|
||||
}
|
||||
|
||||
func RunRC(config testutils.RCConfig) error {
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go
generated
vendored
@ -191,7 +191,7 @@ func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
|
||||
}
|
||||
}
|
||||
|
||||
type containerResourceGatherer struct {
|
||||
type ContainerResourceGatherer struct {
|
||||
client clientset.Interface
|
||||
stopCh chan struct{}
|
||||
workers []resourceGatherWorker
|
||||
@ -208,8 +208,8 @@ type ResourceGathererOptions struct {
|
||||
PrintVerboseLogs bool
|
||||
}
|
||||
|
||||
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*containerResourceGatherer, error) {
|
||||
g := containerResourceGatherer{
|
||||
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
|
||||
g := ContainerResourceGatherer{
|
||||
client: c,
|
||||
stopCh: make(chan struct{}),
|
||||
containerIDs: make([]string, 0),
|
||||
@ -277,7 +277,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
|
||||
// StartGatheringData starts a stat gathering worker blocks for each node to track,
|
||||
// and blocks until StopAndSummarize is called.
|
||||
func (g *containerResourceGatherer) StartGatheringData() {
|
||||
func (g *ContainerResourceGatherer) StartGatheringData() {
|
||||
if len(g.workers) == 0 {
|
||||
return
|
||||
}
|
||||
@ -294,7 +294,7 @@ func (g *containerResourceGatherer) StartGatheringData() {
|
||||
// generates resource summary for the passed-in percentiles, and returns the summary.
|
||||
// It returns an error if the resource usage at any percentile is beyond the
|
||||
// specified resource constraints.
|
||||
func (g *containerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
|
||||
func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
|
||||
close(g.stopCh)
|
||||
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
|
||||
finished := make(chan struct{})
|
||||
|
49
vendor/k8s.io/kubernetes/test/e2e/framework/rs_util.go
generated
vendored
49
vendor/k8s.io/kubernetes/test/e2e/framework/rs_util.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
@ -70,6 +71,54 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
|
||||
func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(replicaSet.Spec.Replicas) && rs.Status.Replicas == *(rs.Spec.Replicas), nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("replicaset %q never had desired number of replicas", replicaSet.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
|
||||
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration && *rs.Spec.Replicas == targetReplicaNum, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("replicaset %q never had desired number of .spec.replicas", replicaSet.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum
|
||||
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.AvailableReplicas == targetReplicaNum, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("replicaset %q never had desired number of .status.availableReplicas", replicaSet.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
|
||||
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
|
18
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
@ -802,7 +802,7 @@ func newEchoServerPodSpec(podName string) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "echoserver",
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
Image: imageutils.GetE2EImage(imageutils.EchoServer),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
|
||||
},
|
||||
},
|
||||
@ -1105,22 +1105,6 @@ func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID {
|
||||
for _, port := range ss.Ports {
|
||||
for _, addr := range ss.Addresses {
|
||||
containerPort := port.Port
|
||||
hostPort := port.Port
|
||||
|
||||
// use endpoint annotations to recover the container port in a Mesos setup
|
||||
// compare contrib/mesos/pkg/service/endpoints_controller.syncService
|
||||
key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort)
|
||||
mesosContainerPortString := endpoints.Annotations[key]
|
||||
if mesosContainerPortString != "" {
|
||||
mesosContainerPort, err := strconv.Atoi(mesosContainerPortString)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
containerPort = int32(mesosContainerPort)
|
||||
Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString)
|
||||
}
|
||||
|
||||
// Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort)
|
||||
if _, ok := m[addr.TargetRef.UID]; !ok {
|
||||
m[addr.TargetRef.UID] = make([]int, 0)
|
||||
}
|
||||
|
30
vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go
generated
vendored
@ -28,7 +28,7 @@ import (
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsV1beta2 "k8s.io/api/apps/v1beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -83,7 +83,7 @@ func NewStatefulSetTester(c clientset.Interface) *StatefulSetTester {
|
||||
|
||||
// GetStatefulSet gets the StatefulSet named name in namespace.
|
||||
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *apps.StatefulSet {
|
||||
ss, err := s.c.AppsV1beta1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
|
||||
ss, err := s.c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
|
||||
}
|
||||
@ -108,7 +108,7 @@ func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.Sta
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
||||
_, err = s.c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
_, err = s.c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
s.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
return ss
|
||||
@ -245,12 +245,12 @@ func (s *StatefulSetTester) Restart(ss *apps.StatefulSet) {
|
||||
|
||||
func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.StatefulSet)) *apps.StatefulSet {
|
||||
for i := 0; i < 3; i++ {
|
||||
ss, err := s.c.AppsV1beta1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
ss, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("failed to get statefulset %q: %v", name, err)
|
||||
}
|
||||
update(ss)
|
||||
ss, err = s.c.AppsV1beta1().StatefulSets(ns).Update(ss)
|
||||
ss, err = s.c.AppsV1().StatefulSets(ns).Update(ss)
|
||||
if err == nil {
|
||||
return ss
|
||||
}
|
||||
@ -328,7 +328,7 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s
|
||||
func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.StatefulSet, *v1.PodList) (bool, error)) {
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := s.c.AppsV1beta1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
|
||||
ssGet, err := s.c.AppsV1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -344,7 +344,7 @@ func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.
|
||||
// The returned StatefulSet contains such a StatefulSetStatus
|
||||
func (s *StatefulSetTester) WaitForStatus(set *apps.StatefulSet) *apps.StatefulSet {
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods *v1.PodList) (bool, error) {
|
||||
if set2.Status.ObservedGeneration != nil && *set2.Status.ObservedGeneration >= set.Generation {
|
||||
if set2.Status.ObservedGeneration >= set.Generation {
|
||||
set = set2
|
||||
return true, nil
|
||||
}
|
||||
@ -613,11 +613,11 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, exp
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := s.c.AppsV1beta1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
ssGet, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if *ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
if ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.ReadyReplicas != expectedReplicas {
|
||||
@ -638,11 +638,11 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expected
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := s.c.AppsV1beta1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
ssGet, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if *ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
if ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.Replicas != expectedReplicas {
|
||||
@ -676,7 +676,7 @@ func (s *StatefulSetTester) SortStatefulPods(pods *v1.PodList) {
|
||||
// DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns.
|
||||
func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
sst := &StatefulSetTester{c: c}
|
||||
ssList, err := c.AppsV1beta1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
ssList, err := c.AppsV1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
ExpectNoError(err)
|
||||
|
||||
// Scale down each statefulset, then delete it completely.
|
||||
@ -692,7 +692,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
Logf("Deleting statefulset %v", ss.Name)
|
||||
// Use OrphanDependents=false so it's deleted synchronously.
|
||||
// We already made sure the Pods are gone inside Scale().
|
||||
if err := c.AppsV1beta1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
|
||||
if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
|
||||
errList = append(errList, fmt.Sprintf("%v", err))
|
||||
}
|
||||
}
|
||||
@ -790,7 +790,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
|
||||
return &apps.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StatefulSet",
|
||||
APIVersion: "apps/v1beta1",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -872,7 +872,7 @@ func (sp statefulPodsByOrdinal) Less(i, j int) bool {
|
||||
type updateStatefulSetFunc func(*apps.StatefulSet)
|
||||
|
||||
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) {
|
||||
statefulSets := c.AppsV1beta1().StatefulSets(namespace)
|
||||
statefulSets := c.AppsV1().StatefulSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil {
|
||||
|
37
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
37
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/spf13/viper"
|
||||
utilflag "k8s.io/apiserver/pkg/util/flag"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
@ -57,14 +58,17 @@ type TestContextType struct {
|
||||
Prefix string
|
||||
MinStartupPods int
|
||||
// Timeout for waiting for system pods to be running
|
||||
SystemPodsStartupTimeout time.Duration
|
||||
UpgradeTarget string
|
||||
EtcdUpgradeStorage string
|
||||
EtcdUpgradeVersion string
|
||||
UpgradeImage string
|
||||
GCEUpgradeScript string
|
||||
ContainerRuntime string
|
||||
ContainerRuntimeEndpoint string
|
||||
SystemPodsStartupTimeout time.Duration
|
||||
UpgradeTarget string
|
||||
EtcdUpgradeStorage string
|
||||
EtcdUpgradeVersion string
|
||||
IngressUpgradeImage string
|
||||
UpgradeImage string
|
||||
GCEUpgradeScript string
|
||||
ContainerRuntime string
|
||||
ContainerRuntimeEndpoint string
|
||||
ContainerRuntimeProcessName string
|
||||
ContainerRuntimePidFile string
|
||||
// SystemdServices are comma separated list of systemd services the test framework
|
||||
// will dump logs for.
|
||||
SystemdServices string
|
||||
@ -83,6 +87,7 @@ type TestContextType struct {
|
||||
GatherLogsSizes bool
|
||||
GatherMetricsAfterTest string
|
||||
GatherSuiteMetricsAfterTest bool
|
||||
AllowGatheringProfiles bool
|
||||
// If set to 'true' framework will gather ClusterAutoscaler metrics when gathering them for other components.
|
||||
IncludeClusterAutoscalerMetrics bool
|
||||
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
|
||||
@ -101,8 +106,8 @@ type TestContextType struct {
|
||||
LogexporterGCSPath string
|
||||
// If the garbage collector is enabled in the kube-apiserver and kube-controller-manager.
|
||||
GarbageCollectorEnabled bool
|
||||
// FeatureGates is a set of key=value pairs that describe feature gates for alpha/experimental features.
|
||||
FeatureGates string
|
||||
// featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
|
||||
FeatureGates map[string]bool
|
||||
// Node e2e specific test context
|
||||
NodeTestContextType
|
||||
// Monitoring solution that is used in current cluster.
|
||||
@ -188,6 +193,7 @@ func RegisterCommonFlags() {
|
||||
flag.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.")
|
||||
flag.StringVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", "false", "If set to 'true' framework will gather metrics from all components after each test. If set to 'master' only master component metrics would be gathered.")
|
||||
flag.BoolVar(&TestContext.GatherSuiteMetricsAfterTest, "gather-suite-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after the whole test suite completes.")
|
||||
flag.BoolVar(&TestContext.AllowGatheringProfiles, "allow-gathering-profiles", true, "If set to true framework will allow to gather CPU/memory allocation pprof profiles from the master.")
|
||||
flag.BoolVar(&TestContext.IncludeClusterAutoscalerMetrics, "include-cluster-autoscaler", false, "If set to true, framework will include Cluster Autoscaler when gathering metrics.")
|
||||
flag.StringVar(&TestContext.OutputPrintType, "output-print-type", "json", "Format in which summaries should be printed: 'hr' for human readable, 'json' for JSON ones.")
|
||||
flag.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
|
||||
@ -200,10 +206,12 @@ func RegisterCommonFlags() {
|
||||
flag.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set", defaultHost))
|
||||
flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
|
||||
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
|
||||
flag.StringVar(&TestContext.FeatureGates, "feature-gates", "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
flag.Var(utilflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
flag.StringVar(&TestContext.Viper, "viper-config", "e2e", "The name of the viper config i.e. 'e2e' will read values from 'e2e.json' locally. All e2e parameters are meant to be configurable by viper.")
|
||||
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/rkt/remote).")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "", "The container runtime endpoint of cluster VM instances.")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
|
||||
flag.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.")
|
||||
flag.StringVar(&TestContext.SystemdServices, "systemd-services", "docker", "The comma separated list of systemd services the framework will dump logs for.")
|
||||
flag.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.")
|
||||
flag.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.")
|
||||
@ -221,7 +229,7 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.")
|
||||
flag.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
|
||||
flag.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
|
||||
flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, vagrant, etc.)")
|
||||
flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, etc.)")
|
||||
flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
|
||||
flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
|
||||
flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
|
||||
@ -254,6 +262,7 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.UpgradeImage, "upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
|
||||
flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.")
|
||||
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
|
||||
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
|
||||
flag.BoolVar(&TestContext.GarbageCollectorEnabled, "garbage-collector-enabled", true, "Set to true if the garbage collector is enabled in the kube-apiserver and kube-controller-manager, then some tests will rely on the garbage collector to delete dependent resources.")
|
||||
@ -292,7 +301,7 @@ func ViperizeFlags() {
|
||||
viper.AddConfigPath(".")
|
||||
viper.ReadInConfig()
|
||||
|
||||
// TODO Consider wether or not we want to use overwriteFlagsWithViperConfig().
|
||||
// TODO Consider whether or not we want to use overwriteFlagsWithViperConfig().
|
||||
viper.Unmarshal(&TestContext)
|
||||
|
||||
AfterReadingAllFlags(&TestContext)
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/timer/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/timer/BUILD
generated
vendored
@ -14,8 +14,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["timer_test.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/timer",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//vendor/github.com/onsi/gomega:go_default_library"],
|
||||
)
|
||||
|
||||
|
251
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
251
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -33,7 +33,6 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
goruntime "runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -51,6 +50,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
@ -62,6 +62,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -75,6 +76,7 @@ import (
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
@ -87,17 +89,18 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodectlr "k8s.io/kubernetes/pkg/controller/node"
|
||||
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
sshutil "k8s.io/kubernetes/pkg/ssh"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -154,9 +157,20 @@ const (
|
||||
// How long claims have to become dynamically provisioned
|
||||
ClaimProvisionTimeout = 5 * time.Minute
|
||||
|
||||
// When these values are updated, also update cmd/kubelet/app/options/options.go
|
||||
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
|
||||
currentPodInfraContainerImageVersion = "3.0"
|
||||
// How long claims have to become bound
|
||||
ClaimBindingTimeout = 3 * time.Minute
|
||||
|
||||
// How long claims have to become deleted
|
||||
ClaimDeletingTimeout = 3 * time.Minute
|
||||
|
||||
// How long PVs have to beome reclaimed
|
||||
PVReclaimingTimeout = 3 * time.Minute
|
||||
|
||||
// How long PVs have to become bound
|
||||
PVBindingTimeout = 3 * time.Minute
|
||||
|
||||
// How long PVs have to become deleted
|
||||
PVDeletingTimeout = 3 * time.Minute
|
||||
|
||||
// How long a node is allowed to become "Ready" after it is restarted before
|
||||
// the test is considered failed.
|
||||
@ -230,13 +244,7 @@ func GetServerArchitecture(c clientset.Interface) string {
|
||||
|
||||
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
|
||||
func GetPauseImageName(c clientset.Interface) string {
|
||||
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
|
||||
}
|
||||
|
||||
// GetPauseImageNameForHostArch fetches the pause image name for the same architecture the test is running on.
|
||||
// TODO: move this function to the test/utils
|
||||
func GetPauseImageNameForHostArch() string {
|
||||
return currentPodInfraContainerImageName + "-" + goruntime.GOARCH + ":" + currentPodInfraContainerImageVersion
|
||||
return imageutils.GetE2EImageWithArch(imageutils.Pause, GetServerArchitecture(c))
|
||||
}
|
||||
|
||||
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
|
||||
@ -331,6 +339,16 @@ func SkipUnlessProviderIs(supportedProviders ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
func SkipUnlessMultizone(c clientset.Interface) {
|
||||
zones, err := GetClusterZones(c)
|
||||
if err != nil {
|
||||
Skipf("Error listing cluster zones")
|
||||
}
|
||||
if zones.Len() <= 1 {
|
||||
Skipf("Requires more than one zone")
|
||||
}
|
||||
}
|
||||
|
||||
func SkipUnlessClusterMonitoringModeIs(supportedMonitoring ...string) {
|
||||
if !ClusterMonitoringModeIs(supportedMonitoring...) {
|
||||
Skipf("Only next monitoring modes are supported %v (not %s)", supportedMonitoring, TestContext.ClusterMonitoringMode)
|
||||
@ -892,6 +910,26 @@ func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.In
|
||||
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
|
||||
}
|
||||
|
||||
// WaitForStatefulSetReplicasReady waits for all replicas of a StatefulSet to become ready or until timeout occurs, whichever comes first.
|
||||
func WaitForStatefulSetReplicasReady(statefulSetName, ns string, c clientset.Interface, Poll, timeout time.Duration) error {
|
||||
Logf("Waiting up to %v for StatefulSet %s to have all replicas ready", timeout, statefulSetName)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
||||
sts, err := c.AppsV1().StatefulSets(ns).Get(statefulSetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Logf("Get StatefulSet %s failed, ignoring for %v: %v", statefulSetName, Poll, err)
|
||||
continue
|
||||
} else {
|
||||
if sts.Status.ReadyReplicas == *sts.Spec.Replicas {
|
||||
Logf("All %d replicas of StatefulSet %s are ready. (%v)", sts.Status.ReadyReplicas, statefulSetName, time.Since(start))
|
||||
return nil
|
||||
} else {
|
||||
Logf("StatefulSet %s found but there are %d ready replicas and %d total replicas.", statefulSetName, sts.Status.ReadyReplicas, *sts.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("StatefulSet %s still has unready pods within %v", statefulSetName, timeout)
|
||||
}
|
||||
|
||||
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
|
||||
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
|
||||
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
|
||||
@ -1204,6 +1242,10 @@ func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, n
|
||||
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
|
||||
continue
|
||||
}
|
||||
// skip unavailable servers
|
||||
if apierrs.IsServiceUnavailable(err) {
|
||||
continue
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
|
||||
@ -1551,6 +1593,28 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodNameUnschedulableInNamespace returns an error if it takes too long for the pod to become Pending
|
||||
// and have condition Status equal to Unschedulable,
|
||||
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason.
|
||||
// Typically called to test that the passed-in pod is Pending and Unschedulable.
|
||||
func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "Unschedulable", PodStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
// Only consider Failed pods. Successful pods will be deleted and detected in
|
||||
// waitForPodCondition's Get call returning `IsNotFound`
|
||||
if pod.Status.Phase == v1.PodPending {
|
||||
for _, cond := range pod.Status.Conditions {
|
||||
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
return true, fmt.Errorf("Expected pod %q in namespace %q to be in phase Pending, but got phase: %v", podName, namespace, pod.Status.Phase)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
|
||||
func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
@ -1649,7 +1713,7 @@ func WaitForEndpoint(c clientset.Interface, ns, name string) error {
|
||||
}
|
||||
|
||||
// Context for checking pods responses by issuing GETs to them (via the API
|
||||
// proxy) and verifying that they answer with ther own pod name.
|
||||
// proxy) and verifying that they answer with there own pod name.
|
||||
type podProxyResponseChecker struct {
|
||||
c clientset.Interface
|
||||
ns string
|
||||
@ -2130,8 +2194,8 @@ func (b kubectlBuilder) Exec() (string, error) {
|
||||
if err != nil {
|
||||
var rc int = 127
|
||||
if ee, ok := err.(*exec.ExitError); ok {
|
||||
Logf("rc: %d", rc)
|
||||
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
|
||||
Logf("rc: %d", rc)
|
||||
}
|
||||
return "", uexec.CodeExitError{
|
||||
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err),
|
||||
@ -2161,6 +2225,26 @@ func RunKubectlOrDieInput(data string, args ...string) string {
|
||||
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
|
||||
}
|
||||
|
||||
// runKubemciWithKubeconfig is a convenience wrapper over runKubemciCmd
|
||||
func runKubemciWithKubeconfig(args ...string) (string, error) {
|
||||
if TestContext.KubeConfig != "" {
|
||||
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
|
||||
}
|
||||
return runKubemciCmd(args...)
|
||||
}
|
||||
|
||||
// runKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci.
|
||||
// It assumes that kubemci exists in PATH.
|
||||
func runKubemciCmd(args ...string) (string, error) {
|
||||
// kubemci is assumed to be in PATH.
|
||||
kubemci := "kubemci"
|
||||
b := new(kubectlBuilder)
|
||||
args = append(args, "--gcp-project="+TestContext.CloudConfig.ProjectID)
|
||||
|
||||
b.cmd = exec.Command(kubemci, args...)
|
||||
return b.Exec()
|
||||
}
|
||||
|
||||
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
|
||||
stdout, err = cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
@ -2687,26 +2771,19 @@ func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) {
|
||||
ExpectNoError(err)
|
||||
}
|
||||
|
||||
func getScalerForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Scaler, error) {
|
||||
return kubectl.ScalerFor(kind, internalClientset)
|
||||
}
|
||||
|
||||
func ScaleResource(
|
||||
clientset clientset.Interface,
|
||||
internalClientset internalclientset.Interface,
|
||||
scalesGetter scaleclient.ScalesGetter,
|
||||
ns, name string,
|
||||
size uint,
|
||||
wait bool,
|
||||
kind schema.GroupKind,
|
||||
gr schema.GroupResource,
|
||||
) error {
|
||||
By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
|
||||
scaler, err := getScalerForKind(internalClientset, kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
|
||||
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
|
||||
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
|
||||
scaler := kubectl.ScalerFor(kind, internalClientset.Batch(), scalesGetter, gr)
|
||||
if err := testutil.ScaleResourceWithRetries(scaler, ns, name, size); err != nil {
|
||||
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
|
||||
}
|
||||
if !wait {
|
||||
@ -3130,10 +3207,10 @@ func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Obj
|
||||
})
|
||||
}
|
||||
|
||||
type updateDSFunc func(*extensions.DaemonSet)
|
||||
type updateDSFunc func(*apps.DaemonSet)
|
||||
|
||||
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *extensions.DaemonSet, err error) {
|
||||
daemonsets := c.ExtensionsV1beta1().DaemonSets(namespace)
|
||||
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *apps.DaemonSet, err error) {
|
||||
daemonsets := c.AppsV1().DaemonSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {
|
||||
@ -3459,12 +3536,6 @@ func GetSigner(provider string) (ssh.Signer, error) {
|
||||
}
|
||||
// Otherwise revert to home dir
|
||||
keyfile = "kube_aws_rsa"
|
||||
case "vagrant":
|
||||
keyfile = os.Getenv("VAGRANT_SSH_KEY")
|
||||
if len(keyfile) != 0 {
|
||||
return sshutil.MakePrivateKeySignerFromFile(keyfile)
|
||||
}
|
||||
return nil, fmt.Errorf("VAGRANT_SSH_KEY env variable should be provided")
|
||||
case "local", "vsphere":
|
||||
keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe?
|
||||
if len(keyfile) == 0 {
|
||||
@ -3905,9 +3976,7 @@ func sshRestartMaster() error {
|
||||
}
|
||||
var command string
|
||||
if ProviderIs("gce") {
|
||||
// `kube-apiserver_kube-apiserver` matches the name of the apiserver
|
||||
// container.
|
||||
command = "sudo docker ps | grep kube-apiserver_kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
|
||||
command = "pidof kube-apiserver | xargs sudo kill"
|
||||
} else {
|
||||
command = "sudo /etc/init.d/kube-apiserver restart"
|
||||
}
|
||||
@ -3938,9 +4007,9 @@ func RestartControllerManager() error {
|
||||
if ProviderIs("gce") && !MasterOSDistroIs("gci") {
|
||||
return fmt.Errorf("unsupported master OS distro: %s", TestContext.MasterOSDistro)
|
||||
}
|
||||
cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1 | xargs sudo docker kill"
|
||||
cmd := "pidof kube-controller-manager | xargs sudo kill"
|
||||
Logf("Restarting controller-manager via ssh, running: %v", cmd)
|
||||
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
return fmt.Errorf("couldn't restart controller-manager: %v", err)
|
||||
@ -3949,9 +4018,9 @@ func RestartControllerManager() error {
|
||||
}
|
||||
|
||||
func WaitForControllerManagerUp() error {
|
||||
cmd := "curl http://localhost:" + strconv.Itoa(ports.ControllerManagerPort) + "/healthz"
|
||||
cmd := "curl http://localhost:" + strconv.Itoa(ports.InsecureKubeControllerManagerPort) + "/healthz"
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
}
|
||||
@ -3965,9 +4034,9 @@ func WaitForControllerManagerUp() error {
|
||||
// CheckForControllerManagerHealthy checks that the controller manager does not crash within "duration"
|
||||
func CheckForControllerManagerHealthy(duration time.Duration) error {
|
||||
var PID string
|
||||
cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1"
|
||||
cmd := "pidof kube-controller-manager"
|
||||
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
|
||||
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
if err != nil {
|
||||
// We don't necessarily know that it crashed, pipe could just be broken
|
||||
LogSSHResult(result)
|
||||
@ -4029,7 +4098,7 @@ func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) e
|
||||
Logf("Cluster has reached the desired number of ready nodes %d", size)
|
||||
return nil
|
||||
}
|
||||
Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numNodes, numNodes-numReady)
|
||||
Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
|
||||
}
|
||||
return fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
|
||||
}
|
||||
@ -4153,42 +4222,6 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
|
||||
return websocket.DialConfig(cfg)
|
||||
}
|
||||
|
||||
// getIngressAddress returns the ips/hostnames associated with the Ingress.
|
||||
func getIngressAddress(client clientset.Interface, ns, name string) ([]string, error) {
|
||||
ing, err := client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addresses := []string{}
|
||||
for _, a := range ing.Status.LoadBalancer.Ingress {
|
||||
if a.IP != "" {
|
||||
addresses = append(addresses, a.IP)
|
||||
}
|
||||
if a.Hostname != "" {
|
||||
addresses = append(addresses, a.Hostname)
|
||||
}
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// WaitForIngressAddress waits for the Ingress to acquire an address.
|
||||
func WaitForIngressAddress(c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) {
|
||||
var address string
|
||||
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
|
||||
ipOrNameList, err := getIngressAddress(c, ns, ingName)
|
||||
if err != nil || len(ipOrNameList) == 0 {
|
||||
Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
|
||||
if IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
address = ipOrNameList[0]
|
||||
return true, nil
|
||||
})
|
||||
return address, err
|
||||
}
|
||||
|
||||
// Looks for the given string in the log of a specific pod container
|
||||
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return LookForString(expectedString, timeout, func() string {
|
||||
@ -4340,7 +4373,7 @@ func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
}
|
||||
|
||||
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
service := gceCloud.GetComputeService()
|
||||
service := gceCloud.ComputeServices().GA
|
||||
list, err := service.ForwardingRules.List(project, region).Do()
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -4810,6 +4843,22 @@ func (p *E2ETestNodePreparer) CleanupNodes() error {
|
||||
return encounteredError
|
||||
}
|
||||
|
||||
func GetClusterID(c clientset.Interface) (string, error) {
|
||||
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
|
||||
if err != nil || cm == nil {
|
||||
return "", fmt.Errorf("error getting cluster ID: %v", err)
|
||||
}
|
||||
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
|
||||
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
|
||||
if !clusterIDExists {
|
||||
return "", fmt.Errorf("cluster ID not set")
|
||||
}
|
||||
if providerIDExists {
|
||||
return providerID, nil
|
||||
}
|
||||
return clusterID, nil
|
||||
}
|
||||
|
||||
// CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with
|
||||
// the given name. The name is usually the UUID of the Service prefixed with an
|
||||
// alpha-numeric character ('a') to work around cloudprovider rules.
|
||||
@ -4960,6 +5009,7 @@ func PollURL(route, host string, timeout time.Duration, interval time.Duration,
|
||||
Logf("host %v path %v: %v unreachable", host, route, err)
|
||||
return expectUnreachable, nil
|
||||
}
|
||||
Logf("host %v path %v: reached", host, route)
|
||||
return !expectUnreachable, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
@ -5059,8 +5109,9 @@ func DumpDebugInfo(c clientset.Interface, ns string) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Get rid of this duplicate function in favour of the one in test/utils.
|
||||
func IsRetryableAPIError(err error) bool {
|
||||
return apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || apierrs.IsTooManyRequests(err) || apierrs.IsInternalError(err)
|
||||
return apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || apierrs.IsTooManyRequests(err) || utilnet.IsProbableEOF(err)
|
||||
}
|
||||
|
||||
// DsFromManifest reads a .json/yaml file and returns the daemonset in it.
|
||||
@ -5125,3 +5176,35 @@ func waitForServerPreferredNamespacedResources(d discovery.DiscoveryInterface, t
|
||||
}
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
// WaitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first.
|
||||
func WaitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
|
||||
Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
||||
_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
|
||||
return nil
|
||||
}
|
||||
Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout)
|
||||
}
|
||||
|
||||
func GetClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
|
||||
}
|
||||
|
||||
// collect values of zone label from all nodes
|
||||
zones := sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
if zone, found := node.Labels[kubeletapis.LabelZoneFailureDomain]; found {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
47
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
47
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
@ -48,21 +48,13 @@ import (
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Current supported images for e2e volume testing to be assigned to VolumeTestConfig.serverImage
|
||||
const (
|
||||
NfsServerImage string = "gcr.io/google_containers/volume-nfs:0.8"
|
||||
IscsiServerImage string = "gcr.io/google_containers/volume-iscsi:0.1"
|
||||
GlusterfsServerImage string = "gcr.io/google_containers/volume-gluster:0.2"
|
||||
CephServerImage string = "gcr.io/google_containers/volume-ceph:0.1"
|
||||
RbdServerImage string = "gcr.io/google_containers/volume-rbd:0.1"
|
||||
)
|
||||
|
||||
const (
|
||||
Kb int64 = 1000
|
||||
Mb int64 = 1000 * Kb
|
||||
@ -72,6 +64,9 @@ const (
|
||||
MiB int64 = 1024 * KiB
|
||||
GiB int64 = 1024 * MiB
|
||||
TiB int64 = 1024 * GiB
|
||||
|
||||
// Waiting period for volume server (Ceph, ...) to initialize itself.
|
||||
VolumeServerPodStartupSleep = 20 * time.Second
|
||||
)
|
||||
|
||||
// Configuration of one tests. The test consist of:
|
||||
@ -91,6 +86,7 @@ type VolumeTestConfig struct {
|
||||
ServerArgs []string
|
||||
// Volumes needed to be mounted to the server container from the host
|
||||
// map <host (source) path> -> <container (dst.) path>
|
||||
// if <host (source) path> is empty, mount a tmpfs emptydir
|
||||
ServerVolumes map[string]string
|
||||
// Wait for the pod to terminate successfully
|
||||
// False indicates that the pod is long running
|
||||
@ -114,10 +110,11 @@ type VolumeTest struct {
|
||||
// NFS-specific wrapper for CreateStorageServer.
|
||||
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "nfs",
|
||||
ServerImage: NfsServerImage,
|
||||
ServerPorts: []int{2049},
|
||||
Namespace: namespace,
|
||||
Prefix: "nfs",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
|
||||
ServerPorts: []int{2049},
|
||||
ServerVolumes: map[string]string{"": "/exports"},
|
||||
}
|
||||
if len(args) > 0 {
|
||||
config.ServerArgs = args
|
||||
@ -131,7 +128,7 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config Volume
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "gluster",
|
||||
ServerImage: GlusterfsServerImage,
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
|
||||
ServerPorts: []int{24007, 24008, 49152},
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
@ -173,7 +170,7 @@ func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTest
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "iscsi",
|
||||
ServerImage: IscsiServerImage,
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer),
|
||||
ServerPorts: []int{3260},
|
||||
ServerVolumes: map[string]string{
|
||||
// iSCSI container needs to insert modules from the host
|
||||
@ -189,13 +186,21 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "rbd",
|
||||
ServerImage: RbdServerImage,
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
|
||||
ServerPorts: []int{6789},
|
||||
ServerVolumes: map[string]string{
|
||||
"/lib/modules": "/lib/modules",
|
||||
},
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
|
||||
// Ceph server container needs some time to start. Tests continue working if
|
||||
// this sleep is removed, however kubelet logs (and kubectl describe
|
||||
// <client pod>) would be cluttered with error messages about non-existing
|
||||
// image.
|
||||
Logf("sleeping a bit to give ceph server time to initialize")
|
||||
time.Sleep(VolumeServerPodStartupSleep)
|
||||
|
||||
return config, pod, ip
|
||||
}
|
||||
|
||||
@ -238,8 +243,12 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
|
||||
for src, dst := range config.ServerVolumes {
|
||||
mountName := fmt.Sprintf("path%d", i)
|
||||
volumes[i].Name = mountName
|
||||
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
|
||||
Path: src,
|
||||
if src == "" {
|
||||
volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{}
|
||||
} else {
|
||||
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
|
||||
Path: src,
|
||||
}
|
||||
}
|
||||
|
||||
mounts[i].Name = mountName
|
||||
@ -432,7 +441,7 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
|
||||
if fsGroup != nil {
|
||||
By("Checking fsGroup is correct.")
|
||||
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: getting the right privileges in the file %v", int(*fsGroup))
|
||||
}
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/generated/gobindata_util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/generated/gobindata_util.go
generated
vendored
@ -28,7 +28,7 @@ func ReadOrDie(filePath string) []byte {
|
||||
|
||||
fileBytes, err := Asset(filePath)
|
||||
if err != nil {
|
||||
gobindataMsg := "An error occured, possibly gobindata doesn't know about the file you're opening. For questions on maintaining gobindata, contact the sig-testing group."
|
||||
gobindataMsg := "An error occurred, possibly gobindata doesn't know about the file you're opening. For questions on maintaining gobindata, contact the sig-testing group."
|
||||
glog.Infof("Available gobindata files: %v ", AssetNames())
|
||||
glog.Fatalf("Failed opening %v , with error %v. %v.", filePath, err, gobindataMsg)
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/elasticsearch/kibana.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/elasticsearch/kibana.go
generated
vendored
@ -57,7 +57,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
|
||||
// Check for the existence of the Kibana service.
|
||||
ginkgo.By("Checking the Kibana service exists.")
|
||||
s := f.ClientSet.Core().Services(metav1.NamespaceSystem)
|
||||
s := f.ClientSet.CoreV1().Services(metav1.NamespaceSystem)
|
||||
// Make a few attempts to connect. This makes the test robust against
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||
@ -73,7 +73,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
ginkgo.By("Checking to make sure the Kibana pods are running")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options)
|
||||
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
for _, pod := range pods.Items {
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
|
||||
@ -82,7 +82,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Checking to make sure we get a response from the Kibana UI.")
|
||||
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
|
||||
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get services proxy request: %v", err)
|
||||
return false, nil
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/elasticsearch/utils.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/elasticsearch/utils.go
generated
vendored
@ -55,7 +55,7 @@ func (p *esLogProvider) Init() error {
|
||||
f := p.Framework
|
||||
// Check for the existence of the Elasticsearch service.
|
||||
framework.Logf("Checking the Elasticsearch service exists.")
|
||||
s := f.ClientSet.Core().Services(api.NamespaceSystem)
|
||||
s := f.ClientSet.CoreV1().Services(api.NamespaceSystem)
|
||||
// Make a few attempts to connect. This makes the test robust against
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
var err error
|
||||
@ -73,7 +73,7 @@ func (p *esLogProvider) Init() error {
|
||||
framework.Logf("Checking to make sure the Elasticsearch pods are running")
|
||||
labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String()
|
||||
options := meta_v1.ListOptions{LabelSelector: labelSelector}
|
||||
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
|
||||
pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -90,7 +90,7 @@ func (p *esLogProvider) Init() error {
|
||||
err = nil
|
||||
var body []byte
|
||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
continue
|
||||
@ -124,7 +124,7 @@ func (p *esLogProvider) Init() error {
|
||||
framework.Logf("Checking health of Elasticsearch service.")
|
||||
healthy := false
|
||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
continue
|
||||
@ -172,7 +172,7 @@ func (p *esLogProvider) Cleanup() {
|
||||
func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||
f := p.Framework
|
||||
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("Failed to get services proxy request: %v", errProxy)
|
||||
return nil
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/generic_soak.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/generic_soak.go
generated
vendored
@ -119,7 +119,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
|
||||
framework.PodStateVerification{
|
||||
Selectors: podlables,
|
||||
ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
|
||||
// we don't validate total log data, since there is no gaurantee all logs will be stored forever.
|
||||
// we don't validate total log data, since there is no guarantee all logs will be stored forever.
|
||||
// instead, we just validate that some logs are being created in std out.
|
||||
Verify: func(p v1.Pod) (bool, error) {
|
||||
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
|
||||
|
21
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdrvier/basic.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdrvier/basic.go
generated
vendored
@ -18,7 +18,6 @@ package stackdriver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -107,19 +106,14 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
||||
err = utils.WaitForLogs(c, ingestionInterval, ingestionTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should ingest logs [Feature:StackdriverLogging]", func() {
|
||||
withLogProviderForScope(f, podsScope, func(p *sdLogProvider) {
|
||||
ginkgo.By("Checking that too long lines are trimmed", func() {
|
||||
originalLength := 100001
|
||||
maxLength := 100 * 1024
|
||||
cmd := []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
fmt.Sprintf("while :; do printf '%%*s' %d | tr ' ' 'A'; echo; sleep 60; done", originalLength),
|
||||
fmt.Sprintf("while :; do printf '%%*s' %d | tr ' ' 'A'; echo; sleep 60; done", maxLength+1),
|
||||
}
|
||||
trimPrefix := "[Trimmed]"
|
||||
|
||||
pod, err := utils.StartAndReturnSelf(utils.NewExecLoggingPod("synthlogger-4", cmd), f)
|
||||
framework.ExpectNoError(err, "Failed to start a pod")
|
||||
@ -133,11 +127,8 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
||||
if log.JSONPayload != nil {
|
||||
return false, fmt.Errorf("got json log entry %v, wanted plain text", log.JSONPayload)
|
||||
}
|
||||
if len(log.TextPayload) == originalLength {
|
||||
return false, fmt.Errorf("got non-trimmed entry of length %d", len(log.TextPayload))
|
||||
}
|
||||
if !strings.HasPrefix(log.TextPayload, trimPrefix) {
|
||||
return false, fmt.Errorf("got message without prefix '%s': %s", trimPrefix, log.TextPayload)
|
||||
if len(log.TextPayload) > maxLength {
|
||||
return false, fmt.Errorf("got too long entry of length %d", len(log.TextPayload))
|
||||
}
|
||||
return true, nil
|
||||
}, utils.JustTimeout, pod.Name())
|
||||
@ -187,9 +178,9 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.By("Waiting for some docker logs to be ingested from each node", func() {
|
||||
ginkgo.By("Waiting for some container runtime logs to be ingested from each node", func() {
|
||||
nodeIds := utils.GetNodeIds(f.ClientSet)
|
||||
log := fmt.Sprintf("projects/%s/logs/docker", framework.TestContext.CloudConfig.ProjectID)
|
||||
log := fmt.Sprintf("projects/%s/logs/container-runtime", framework.TestContext.CloudConfig.ProjectID)
|
||||
c := utils.NewLogChecker(p, utils.UntilFirstEntryFromLog(log), utils.JustTimeout, nodeIds...)
|
||||
err := utils.WaitForLogs(c, ingestionInterval, ingestionTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
91
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdrvier/utils.go
generated
vendored
91
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdrvier/utils.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -45,6 +46,9 @@ const (
|
||||
|
||||
// PubSub topic with log entries polling interval
|
||||
sdLoggingPollInterval = 100 * time.Millisecond
|
||||
|
||||
// The parallelism level of polling logs process.
|
||||
sdLoggingPollParallelism = 10
|
||||
)
|
||||
|
||||
type logProviderScope int
|
||||
@ -68,6 +72,7 @@ type sdLogProvider struct {
|
||||
logSink *sd.LogSink
|
||||
|
||||
pollingStopChannel chan struct{}
|
||||
pollingWG *sync.WaitGroup
|
||||
|
||||
queueCollection utils.LogsQueueCollection
|
||||
|
||||
@ -92,7 +97,8 @@ func newSdLogProvider(f *framework.Framework, scope logProviderScope) (*sdLogPro
|
||||
sdService: sdService,
|
||||
pubsubService: pubsubService,
|
||||
framework: f,
|
||||
pollingStopChannel: make(chan struct{}, 1),
|
||||
pollingStopChannel: make(chan struct{}),
|
||||
pollingWG: &sync.WaitGroup{},
|
||||
queueCollection: utils.NewLogsQueueCollection(maxQueueSize),
|
||||
}
|
||||
return provider, nil
|
||||
@ -128,13 +134,14 @@ func (p *sdLogProvider) Init() error {
|
||||
return fmt.Errorf("failed to wait for sink to become operational: %v", err)
|
||||
}
|
||||
|
||||
go p.pollLogs()
|
||||
p.startPollingLogs()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *sdLogProvider) Cleanup() {
|
||||
p.pollingStopChannel <- struct{}{}
|
||||
close(p.pollingStopChannel)
|
||||
p.pollingWG.Wait()
|
||||
|
||||
if p.logSink != nil {
|
||||
projectID := framework.TestContext.CloudConfig.ProjectID
|
||||
@ -257,44 +264,54 @@ func (p *sdLogProvider) waitSinkInit() error {
|
||||
})
|
||||
}
|
||||
|
||||
func (p *sdLogProvider) pollLogs() {
|
||||
wait.PollUntil(sdLoggingPollInterval, func() (bool, error) {
|
||||
messages, err := pullAndAck(p.pubsubService, p.subscription)
|
||||
func (p *sdLogProvider) startPollingLogs() {
|
||||
for i := 0; i < sdLoggingPollParallelism; i++ {
|
||||
p.pollingWG.Add(1)
|
||||
go func() {
|
||||
defer p.pollingWG.Done()
|
||||
|
||||
wait.PollUntil(sdLoggingPollInterval, func() (bool, error) {
|
||||
p.pollLogsOnce()
|
||||
return false, nil
|
||||
}, p.pollingStopChannel)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *sdLogProvider) pollLogsOnce() {
|
||||
messages, err := pullAndAck(p.pubsubService, p.subscription)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, msg := range messages {
|
||||
logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||
return false, nil
|
||||
framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, msg := range messages {
|
||||
logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data)
|
||||
if err != nil {
|
||||
framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
|
||||
continue
|
||||
}
|
||||
|
||||
var sdLogEntry sd.LogEntry
|
||||
if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil {
|
||||
framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
|
||||
continue
|
||||
}
|
||||
|
||||
name, ok := p.tryGetName(sdLogEntry)
|
||||
if !ok {
|
||||
framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
|
||||
continue
|
||||
}
|
||||
|
||||
logEntry, err := convertLogEntry(sdLogEntry)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to parse Stackdriver LogEntry: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
p.queueCollection.Push(name, logEntry)
|
||||
var sdLogEntry sd.LogEntry
|
||||
if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil {
|
||||
framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
|
||||
continue
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}, p.pollingStopChannel)
|
||||
name, ok := p.tryGetName(sdLogEntry)
|
||||
if !ok {
|
||||
framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
|
||||
continue
|
||||
}
|
||||
|
||||
logEntry, err := convertLogEntry(sdLogEntry)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to parse Stackdriver LogEntry: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
p.queueCollection.Push(name, logEntry)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *sdLogProvider) tryGetName(sdLogEntry sd.LogEntry) (string, bool) {
|
||||
@ -311,6 +328,8 @@ func (p *sdLogProvider) tryGetName(sdLogEntry sd.LogEntry) (string, bool) {
|
||||
|
||||
func convertLogEntry(sdLogEntry sd.LogEntry) (entry utils.LogEntry, err error) {
|
||||
entry = utils.LogEntry{LogName: sdLogEntry.LogName}
|
||||
entry.Location = sdLogEntry.Resource.Labels["location"]
|
||||
|
||||
if sdLogEntry.TextPayload != "" {
|
||||
entry.TextPayload = sdLogEntry.TextPayload
|
||||
return
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/logging_pod.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/logging_pod.go
generated
vendored
@ -21,10 +21,12 @@ import (
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -101,7 +103,7 @@ func (p *loadLoggingPod) Start(f *framework.Framework) error {
|
||||
Containers: []api_v1.Container{
|
||||
{
|
||||
Name: loggingContainerName,
|
||||
Image: "gcr.io/google_containers/logs-generator:v0.1.0",
|
||||
Image: imageutils.GetE2EImage(imageutils.LogsGenerator),
|
||||
Env: []api_v1.EnvVar{
|
||||
{
|
||||
Name: "LOGS_GENERATOR_LINES_TOTAL",
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/types.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/types.go
generated
vendored
@ -32,6 +32,7 @@ var (
|
||||
type LogEntry struct {
|
||||
LogName string
|
||||
TextPayload string
|
||||
Location string
|
||||
JSONPayload map[string]interface{}
|
||||
}
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/wait.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/wait.go
generated
vendored
@ -48,6 +48,9 @@ func UntilFirstEntryFromLog(log string) IngestionPred {
|
||||
return func(_ string, entries []LogEntry) (bool, error) {
|
||||
for _, e := range entries {
|
||||
if e.LogName == log {
|
||||
if e.Location != framework.TestContext.CloudConfig.Zone {
|
||||
return false, fmt.Errorf("Bad location in logs '%s' != '%d'", e.Location, framework.TestContext.CloudConfig.Zone)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/BUILD
generated
vendored
@ -8,12 +8,14 @@ load(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"accelerator.go",
|
||||
"cadvisor.go",
|
||||
"custom_metrics_deployments.go",
|
||||
"custom_metrics_stackdriver.go",
|
||||
"influxdb.go",
|
||||
"metrics_grabber.go",
|
||||
"stackdriver.go",
|
||||
"stackdriver_metadata_agent.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/instrumentation/monitoring",
|
||||
deps = [
|
||||
@ -21,6 +23,8 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/influxdata/influxdb/client/v2:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
@ -29,6 +33,7 @@ go_library(
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
@ -36,7 +41,6 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
|
||||
],
|
||||
)
|
||||
|
134
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/accelerator.go
generated
vendored
Normal file
134
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/accelerator.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"golang.org/x/oauth2/google"
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||
"k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// Stackdriver container accelerator metrics, as described here:
|
||||
// https://cloud.google.com/monitoring/api/metrics_gcp#gcp-container
|
||||
var acceleratorMetrics = []string{
|
||||
"accelerator/duty_cycle",
|
||||
"accelerator/memory_total",
|
||||
"accelerator/memory_used",
|
||||
}
|
||||
|
||||
var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("stackdriver-monitoring")
|
||||
|
||||
It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func() {
|
||||
testStackdriverAcceleratorMonitoring(f)
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
|
||||
gcmService, err := gcm.New(client)
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// set this env var if accessing Stackdriver test endpoint (default is prod):
|
||||
// $ export STACKDRIVER_API_ENDPOINT_OVERRIDE=https://test-monitoring.sandbox.googleapis.com/
|
||||
basePathOverride := os.Getenv("STACKDRIVER_API_ENDPOINT_OVERRIDE")
|
||||
if basePathOverride != "" {
|
||||
gcmService.BasePath = basePathOverride
|
||||
}
|
||||
|
||||
scheduling.SetupNVIDIAGPUNode(f, false)
|
||||
|
||||
// TODO: remove this after cAdvisor race is fixed.
|
||||
time.Sleep(time.Minute)
|
||||
|
||||
f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rcName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: rcName,
|
||||
Image: image.GetE2EImage(image.CudaVectorAdd),
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{"nvidia-smi && sleep infinity"},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
framework.NVIDIAGPUResourceName: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
metricsMap := map[string]bool{}
|
||||
pollingFunction := checkForAcceleratorMetrics(projectId, gcmService, time.Now(), metricsMap)
|
||||
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
|
||||
if err != nil {
|
||||
framework.Logf("Missing metrics: %+v", metricsMap)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func checkForAcceleratorMetrics(projectId string, gcmService *gcm.Service, start time.Time, metricsMap map[string]bool) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
counter := 0
|
||||
for _, metric := range acceleratorMetrics {
|
||||
metricsMap[metric] = false
|
||||
}
|
||||
for _, metric := range acceleratorMetrics {
|
||||
// TODO: check only for metrics from this cluster
|
||||
ts, err := fetchTimeSeries(projectId, gcmService, metric, start, time.Now())
|
||||
framework.ExpectNoError(err)
|
||||
if len(ts) > 0 {
|
||||
counter = counter + 1
|
||||
metricsMap[metric] = true
|
||||
framework.Logf("Received %v timeseries for metric %v", len(ts), metric)
|
||||
} else {
|
||||
framework.Logf("No timeseries for metric %v", metric)
|
||||
}
|
||||
}
|
||||
if counter < 3 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/cadvisor.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/cadvisor.go
generated
vendored
@ -68,7 +68,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
|
||||
for _, node := range nodeList.Items {
|
||||
// cadvisor is not accessible directly unless its port (4194 by default) is exposed.
|
||||
// Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally.
|
||||
statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
|
||||
statsResource := fmt.Sprintf("api/v1/nodes/%s/proxy/stats/", node.Name)
|
||||
By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
|
||||
_, err = c.CoreV1().RESTClient().Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
|
||||
if err != nil {
|
||||
|
120
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go
generated
vendored
120
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go
generated
vendored
@ -18,6 +18,7 @@ package monitoring
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
@ -27,10 +28,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
CustomMetricName = "foo-metric"
|
||||
UnusedMetricName = "unused-metric"
|
||||
CustomMetricValue = int64(448)
|
||||
UnusedMetricValue = int64(446)
|
||||
CustomMetricName = "foo"
|
||||
UnusedMetricName = "unused"
|
||||
CustomMetricValue = int64(448)
|
||||
UnusedMetricValue = int64(446)
|
||||
StackdriverExporter = "stackdriver-exporter"
|
||||
// HPAPermissions is a ClusterRoleBinding that grants unauthenticated user permissions granted for
|
||||
// HPA for testing purposes, i.e. it should grant permission to read custom metrics.
|
||||
HPAPermissions = &rbac.ClusterRoleBinding{
|
||||
@ -52,9 +54,37 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
// StackdriverExporterDeployment is a Deployment of simple application that exports a metric of
|
||||
// CustomMetricContainerSpec allows to specify a config for StackdriverExporterDeployment
|
||||
// with multiple containers exporting different metrics.
|
||||
type CustomMetricContainerSpec struct {
|
||||
Name string
|
||||
MetricName string
|
||||
MetricValue int64
|
||||
}
|
||||
|
||||
// SimpleStackdriverExporterDeployment is a Deployment of simple application that exports a metric of
|
||||
// fixed value to Stackdriver in a loop.
|
||||
func StackdriverExporterDeployment(name, namespace string, replicas int32, metricValue int64) *extensions.Deployment {
|
||||
func SimpleStackdriverExporterDeployment(name, namespace string, replicas int32, metricValue int64) *extensions.Deployment {
|
||||
return StackdriverExporterDeployment(name, namespace, replicas,
|
||||
[]CustomMetricContainerSpec{
|
||||
{
|
||||
Name: StackdriverExporter,
|
||||
MetricName: CustomMetricName,
|
||||
MetricValue: metricValue,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// StackdriverExporterDeployment is a Deployment of an application that can expose
|
||||
// an arbitrary amount of metrics of fixed value to Stackdriver in a loop. Each metric
|
||||
// is exposed by a different container in one pod.
|
||||
// The metric names and values are configured via the containers parameter.
|
||||
func StackdriverExporterDeployment(name, namespace string, replicas int32, containers []CustomMetricContainerSpec) *extensions.Deployment {
|
||||
podSpec := corev1.PodSpec{Containers: []corev1.Container{}}
|
||||
for _, containerSpec := range containers {
|
||||
podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, containerSpec.MetricName, containerSpec.MetricValue))
|
||||
}
|
||||
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -70,7 +100,7 @@ func StackdriverExporterDeployment(name, namespace string, replicas int32, metri
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Spec: stackdriverExporterPodSpec(CustomMetricName, metricValue),
|
||||
Spec: podSpec,
|
||||
},
|
||||
Replicas: &replicas,
|
||||
},
|
||||
@ -88,18 +118,75 @@ func StackdriverExporterPod(podName, namespace, podLabel, metricName string, met
|
||||
"name": podLabel,
|
||||
},
|
||||
},
|
||||
Spec: stackdriverExporterPodSpec(metricName, metricValue),
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{stackdriverExporterContainerSpec(StackdriverExporter, metricName, metricValue)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func stackdriverExporterPodSpec(metricName string, metricValue int64) corev1.PodSpec {
|
||||
func stackdriverExporterContainerSpec(name string, metricName string, metricValue int64) corev1.Container {
|
||||
return corev1.Container{
|
||||
Name: name,
|
||||
Image: "k8s.gcr.io/sd-dummy-exporter:v0.1.0",
|
||||
ImagePullPolicy: corev1.PullPolicy("Always"),
|
||||
Command: []string{"/sd_dummy_exporter", "--pod-id=$(POD_ID)", "--metric-name=" + metricName, fmt.Sprintf("--metric-value=%v", metricValue)},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "POD_ID",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{{ContainerPort: 80}},
|
||||
}
|
||||
}
|
||||
|
||||
// PrometheusExporterDeployment is a Deployment of simple application with two containers
|
||||
// one exposing a metric in prometheus fromat and second a prometheus-to-sd container
|
||||
// that scrapes the metric and pushes it to stackdriver.
|
||||
func PrometheusExporterDeployment(name, namespace string, replicas int32, metricValue int64) *extensions.Deployment {
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": name},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Spec: prometheusExporterPodSpec(CustomMetricName, metricValue, 8080),
|
||||
},
|
||||
Replicas: &replicas,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) corev1.PodSpec {
|
||||
return corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "stackdriver-exporter",
|
||||
Image: "gcr.io/google-containers/sd-dummy-exporter:v0.1.0",
|
||||
Name: "prometheus-exporter",
|
||||
Image: "k8s.gcr.io/prometheus-dummy-exporter:v0.1.0",
|
||||
ImagePullPolicy: corev1.PullPolicy("Always"),
|
||||
Command: []string{"/sd_dummy_exporter", "--pod-id=$(POD_ID)", "--metric-name=" + metricName, fmt.Sprintf("--metric-value=%v", metricValue)},
|
||||
Command: []string{"/prometheus_dummy_exporter", "--metric-name=" + metricName,
|
||||
fmt.Sprintf("--metric-value=%v", metricValue), fmt.Sprintf("=--port=%d", port)},
|
||||
Ports: []corev1.ContainerPort{{ContainerPort: port}},
|
||||
},
|
||||
{
|
||||
Name: "prometheus-to-sd",
|
||||
Image: "k8s.gcr.io/prometheus-to-sd:v0.2.3",
|
||||
ImagePullPolicy: corev1.PullPolicy("Always"),
|
||||
Command: []string{"/monitor", fmt.Sprintf("--source=:http://localhost:%d", port),
|
||||
"--stackdriver-prefix=custom.googleapis.com", "--pod-id=$(POD_ID)", "--namespace-id=$(POD_NAMESPACE)"},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "POD_ID",
|
||||
@ -109,8 +196,15 @@ func stackdriverExporterPodSpec(metricName string, metricValue int64) corev1.Pod
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{{ContainerPort: 80}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/client-go/discovery"
|
||||
kubeaggrcs "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
customclient "k8s.io/metrics/pkg/client/custom_metrics"
|
||||
)
|
||||
@ -50,13 +49,11 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("stackdriver-monitoring")
|
||||
var kubeClient clientset.Interface
|
||||
var kubeAggrClient kubeaggrcs.Interface
|
||||
var customMetricsClient customclient.CustomMetricsClient
|
||||
var discoveryClient *discovery.DiscoveryClient
|
||||
|
||||
It("should run Custom Metrics - Stackdriver Adapter [Feature:StackdriverCustomMetrics]", func() {
|
||||
kubeClient = f.ClientSet
|
||||
kubeAggrClient = f.AggregatorClient
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to load config: %s", err)
|
||||
@ -104,8 +101,8 @@ func testAdapter(f *framework.Framework, kubeClient clientset.Interface, customM
|
||||
}
|
||||
defer CleanupAdapter()
|
||||
|
||||
_, err = kubeClient.Rbac().ClusterRoleBindings().Create(HPAPermissions)
|
||||
defer kubeClient.Rbac().ClusterRoleBindings().Delete("custom-metrics-reader", &metav1.DeleteOptions{})
|
||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
|
||||
defer kubeClient.RbacV1().ClusterRoleBindings().Delete("custom-metrics-reader", &metav1.DeleteOptions{})
|
||||
|
||||
// Run application that exports the metric
|
||||
err = createSDExporterPods(f, kubeClient)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/influxdb.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/influxdb.go
generated
vendored
@ -122,7 +122,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
psList, err := c.AppsV1beta1().StatefulSets(metav1.NamespaceSystem).List(options)
|
||||
psList, err := c.AppsV1().StatefulSets(metav1.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
169
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go
generated
vendored
Normal file
169
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2/google"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"io/ioutil"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
// Time to wait after a pod creation for it's metadata to be exported
|
||||
metadataWaitTime = 120 * time.Second
|
||||
|
||||
// Scope for Stackdriver Metadata API
|
||||
MonitoringScope = "https://www.googleapis.com/auth/monitoring"
|
||||
)
|
||||
|
||||
var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("stackdriver-monitoring")
|
||||
var kubeClient clientset.Interface
|
||||
|
||||
It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func() {
|
||||
kubeClient = f.ClientSet
|
||||
testAgent(f, kubeClient)
|
||||
})
|
||||
})
|
||||
|
||||
func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
resourceType := "k8s_container"
|
||||
uniqueContainerName := fmt.Sprintf("test-container-%v", time.Now().Unix())
|
||||
endpoint := fmt.Sprintf(
|
||||
"https://stackdriver.googleapis.com/v1beta2/projects/%v/resourceMetadata?filter=resource.type%%3D%v+AND+resource.label.container_name%%3D%v",
|
||||
projectId,
|
||||
resourceType,
|
||||
uniqueContainerName)
|
||||
|
||||
oauthClient, err := google.DefaultClient(context.Background(), MonitoringScope)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create oauth client: %s", err)
|
||||
}
|
||||
|
||||
// Create test pod with unique name.
|
||||
framework.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Name = uniqueContainerName
|
||||
})
|
||||
defer kubeClient.CoreV1().Pods(f.Namespace.Name).Delete(uniqueContainerName, &metav1.DeleteOptions{})
|
||||
|
||||
// Wait a short amount of time for Metadata Agent to be created and metadata to be exported
|
||||
time.Sleep(metadataWaitTime)
|
||||
|
||||
resp, err := oauthClient.Get(endpoint)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to call Stackdriver Metadata API %s", err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
framework.Failf("Stackdriver Metadata API returned error status: %s", resp.Status)
|
||||
}
|
||||
metadataAPIResponse, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to read response from Stackdriver Metadata API: %s", err)
|
||||
}
|
||||
|
||||
exists, err := verifyPodExists(metadataAPIResponse, uniqueContainerName)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to process response from Stackdriver Metadata API: %s", err)
|
||||
}
|
||||
if !exists {
|
||||
framework.Failf("Missing Metadata for container %q", uniqueContainerName)
|
||||
}
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
Results []map[string]interface{}
|
||||
}
|
||||
|
||||
type Resource struct {
|
||||
resourceType string
|
||||
resourceLabels map[string]string
|
||||
}
|
||||
|
||||
func verifyPodExists(response []byte, containerName string) (bool, error) {
|
||||
var metadata Metadata
|
||||
err := json.Unmarshal(response, &metadata)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to unmarshall: %s", err)
|
||||
}
|
||||
|
||||
for _, result := range metadata.Results {
|
||||
rawResource, ok := result["resource"]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("No resource entry in response from Stackdriver Metadata API")
|
||||
}
|
||||
resource, err := parseResource(rawResource)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("No 'resource' label: %s", err)
|
||||
}
|
||||
if resource.resourceType == "k8s_container" &&
|
||||
resource.resourceLabels["container_name"] == containerName {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func parseResource(resource interface{}) (*Resource, error) {
|
||||
var labels map[string]string = map[string]string{}
|
||||
resourceMap, ok := resource.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Resource entry is of type %s, expected map[string]interface{}", reflect.TypeOf(resource))
|
||||
}
|
||||
resourceType, ok := resourceMap["type"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Resource entry doesn't have a type specified")
|
||||
}
|
||||
resourceTypeName, ok := resourceType.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Resource type is of type %s, expected string", reflect.TypeOf(resourceType))
|
||||
}
|
||||
resourceLabels, ok := resourceMap["labels"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Resource entry doesn't have any labels specified")
|
||||
}
|
||||
resourceLabelMap, ok := resourceLabels.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Resource labels entry is of type %s, expected map[string]interface{}", reflect.TypeOf(resourceLabels))
|
||||
}
|
||||
for label, val := range resourceLabelMap {
|
||||
labels[label], ok = val.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Resource label %q is of type %s, expected string", label, reflect.TypeOf(val))
|
||||
}
|
||||
}
|
||||
return &Resource{
|
||||
resourceType: resourceTypeName,
|
||||
resourceLabels: labels,
|
||||
}, nil
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/BUILD
generated
vendored
@ -12,7 +12,6 @@ go_library(
|
||||
"cluster_upgrade.go",
|
||||
"framework.go",
|
||||
"ha_master.go",
|
||||
"node_auto_repairs.go",
|
||||
"reboot.go",
|
||||
"resize_nodes.go",
|
||||
"restart.go",
|
||||
@ -32,7 +31,6 @@ go_library(
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//test/utils/junit:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/ssh:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/BUILD
generated
vendored
@ -14,7 +14,6 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap",
|
||||
deps = [
|
||||
"//pkg/bootstrap/api:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/lifecycle:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
@ -24,6 +23,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/bootstrap_signer.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/bootstrap_signer.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/lifecycle"
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/lifecycle"
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/util.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
|
55
vendor/k8s.io/kubernetes/test/e2e/lifecycle/cluster_upgrade.go
generated
vendored
55
vendor/k8s.io/kubernetes/test/e2e/lifecycle/cluster_upgrade.go
generated
vendored
@ -72,6 +72,11 @@ var kubeProxyDowngradeTests = []upgrades.Test{
|
||||
&upgrades.IngressUpgradeTest{},
|
||||
}
|
||||
|
||||
// Forcefully swap ingress image.
|
||||
var ingressUpgradeTests = []upgrades.Test{
|
||||
&upgrades.IngressUpgradeTest{},
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||
f := framework.NewDefaultFramework("cluster-upgrade")
|
||||
|
||||
@ -201,6 +206,56 @@ var _ = SIGDescribe("etcd Upgrade [Feature:EtcdUpgrade]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("ingress Upgrade [Feature:IngressUpgrade]", func() {
|
||||
f := framework.NewDefaultFramework("ingress-upgrade")
|
||||
|
||||
// Create the frameworks here because we can only create them
|
||||
// in a "Describe".
|
||||
testFrameworks := createUpgradeFrameworks(ingressUpgradeTests)
|
||||
Describe("ingress upgrade", func() {
|
||||
It("should maintain a functioning ingress", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), "")
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "ingress upgrade"}
|
||||
ingressTest := &junit.TestCase{Name: "[sig-networking] ingress-upgrade", Classname: "upgrade_tests"}
|
||||
testSuite.TestCases = append(testSuite.TestCases, ingressTest)
|
||||
|
||||
upgradeFunc := func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, ingressTest)
|
||||
framework.ExpectNoError(framework.IngressUpgrade(true))
|
||||
}
|
||||
runUpgradeSuite(f, ingressUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.IngressUpgrade, upgradeFunc)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("ingress Downgrade [Feature:IngressDowngrade]", func() {
|
||||
f := framework.NewDefaultFramework("ingress-downgrade")
|
||||
|
||||
// Create the frameworks here because we can only create them
|
||||
// in a "Describe".
|
||||
testFrameworks := createUpgradeFrameworks(ingressUpgradeTests)
|
||||
Describe("ingress downgrade", func() {
|
||||
It("should maintain a functioning ingress", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), "")
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "ingress downgrade"}
|
||||
ingressTest := &junit.TestCase{Name: "[sig-networking] ingress-downgrade", Classname: "upgrade_tests"}
|
||||
testSuite.TestCases = append(testSuite.TestCases, ingressTest)
|
||||
|
||||
upgradeFunc := func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, ingressTest)
|
||||
framework.ExpectNoError(framework.IngressUpgrade(false))
|
||||
}
|
||||
runUpgradeSuite(f, ingressUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.IngressUpgrade, upgradeFunc)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func() {
|
||||
f := framework.NewDefaultFramework("stateful-upgrade")
|
||||
|
||||
|
166
vendor/k8s.io/kubernetes/test/e2e/lifecycle/node_auto_repairs.go
generated
vendored
166
vendor/k8s.io/kubernetes/test/e2e/lifecycle/node_auto_repairs.go
generated
vendored
@ -1,166 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTimeout = 3 * time.Minute
|
||||
repairTimeout = 20 * time.Minute
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Node Auto Repairs [Slow] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("lifecycle")
|
||||
var c clientset.Interface
|
||||
var originalNodes map[string]string
|
||||
var nodeCount int
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
c = f.ClientSet
|
||||
nodeCount = 0
|
||||
originalNodes = make(map[string]string)
|
||||
for _, groupName := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
glog.Infof("Processing group %s", groupName)
|
||||
nodes, err := framework.GetGroupNodes(groupName)
|
||||
framework.ExpectNoError(err)
|
||||
for _, node := range nodes {
|
||||
nodeReady, err := isNodeReady(c, node)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(nodeReady).To(Equal(true))
|
||||
originalNodes[groupName] = node
|
||||
nodeCount++
|
||||
}
|
||||
}
|
||||
glog.Infof("Number of nodes %d", nodeCount)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
for groupName, nodeName := range originalNodes {
|
||||
nodeReady, err := isNodeReady(c, nodeName)
|
||||
framework.ExpectNoError(err)
|
||||
if !nodeReady {
|
||||
framework.ExpectNoError(recreateNode(nodeName, groupName))
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, defaultTimeout))
|
||||
})
|
||||
|
||||
It("should repair node [Feature:NodeAutoRepairs]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
framework.ExpectNoError(enableAutoRepair("default-pool"))
|
||||
defer disableAutoRepair("default-pool")
|
||||
readyNodes := getReadyNodes(c)
|
||||
Expect(len(readyNodes)).NotTo(Equal(0))
|
||||
nodeName := readyNodes[0].Name
|
||||
framework.ExpectNoError(stopKubeletOnNode(nodeName))
|
||||
By("Wait till node is unready.")
|
||||
Expect(framework.WaitForNodeToBeNotReady(c, nodeName, defaultTimeout)).To(Equal(true))
|
||||
By("Wait till node is repaired.")
|
||||
Expect(framework.WaitForNodeToBeReady(c, nodeName, repairTimeout)).To(Equal(true))
|
||||
})
|
||||
})
|
||||
|
||||
func execCmd(args ...string) *exec.Cmd {
|
||||
glog.Infof("Executing: %s", strings.Join(args, " "))
|
||||
return exec.Command(args[0], args[1:]...)
|
||||
}
|
||||
|
||||
func getReadyNodes(c clientset.Interface) []v1.Node {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(c)
|
||||
return nodeList.Items
|
||||
}
|
||||
|
||||
func enableAutoRepair(nodePool string) error {
|
||||
glog.Infof("Using gcloud to enable auto repair for pool %s", nodePool)
|
||||
output, err := execCmd("gcloud", "beta", "container", "node-pools", "update", nodePool,
|
||||
"--enable-autorepair",
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone,
|
||||
"--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to enable auto repair: %s", string(output))
|
||||
return fmt.Errorf("failed to enable auto repair: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func disableAutoRepair(nodePool string) error {
|
||||
glog.Infof("Using gcloud to disable auto repair for pool %s", nodePool)
|
||||
output, err := execCmd("gcloud", "beta", "container", "node-pools", "update", nodePool,
|
||||
"--no-enable-autorepair",
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone,
|
||||
"--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to disable auto repair: %s", string(output))
|
||||
return fmt.Errorf("failed to disable auto repair: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func stopKubeletOnNode(node string) error {
|
||||
glog.Infof("Using gcloud to stop Kublet on node %s", node)
|
||||
output, err := execCmd("gcloud", "compute", "ssh", node,
|
||||
"--command=sudo systemctl stop kubelet-monitor.service && sudo systemctl stop kubelet.service",
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to stop Kubelet: %v", string(output))
|
||||
return fmt.Errorf("failed to stop Kubelet: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func recreateNode(nodeName string, groupName string) error {
|
||||
glog.Infof("Using gcloud to recreate node %s", nodeName)
|
||||
//gcloud compute instance-groups managed recreate-instances gke-oneoff-default-pool-e4383993-grp --instances=gke-oneoff-default-pool-e4383993-81jm --zone=us-central1-c
|
||||
output, err := execCmd("gcloud", "compute", "instance-groups", "managed", "recreate-instances", groupName,
|
||||
"--instances="+nodeName,
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to recreate node: %s", string(output))
|
||||
return fmt.Errorf("failed to recreate node: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isNodeReady(c clientset.Interface, nodeName string) (bool, error) {
|
||||
glog.Infof("Check if node %s is ready ", nodeName)
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
result := framework.IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
|
||||
glog.Infof("Node %s is ready: %t", nodeName, result)
|
||||
return result, nil
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/reboot.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/reboot.go
generated
vendored
@ -110,7 +110,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
||||
It("each node by switching off the network interface and ensure they function upon switch on", func() {
|
||||
// switch the network interface off for a while to simulate a network outage
|
||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down.
|
||||
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && (sudo ifdown eth0 || sudo ip link set eth0 down) && sleep 120 && (sudo ifup eth0 || sudo ip link set eth0 up)' >/dev/null 2>&1 &", nil)
|
||||
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo ip link set eth0 down && sleep 120 && sudo ip link set eth0 up && (sudo dhclient || true)' >/dev/null 2>&1 &", nil)
|
||||
})
|
||||
|
||||
It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() {
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/manifest/BUILD
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/manifest/BUILD
generated
vendored
@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"])
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
@ -10,9 +11,10 @@ go_library(
|
||||
srcs = ["manifest.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/manifest",
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//test/e2e/generated:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -33,3 +35,10 @@ filegroup(
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["manifest_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//vendor/k8s.io/api/extensions/v1beta1:go_default_library"],
|
||||
)
|
||||
|
20
vendor/k8s.io/kubernetes/test/e2e/manifest/manifest.go
generated
vendored
20
vendor/k8s.io/kubernetes/test/e2e/manifest/manifest.go
generated
vendored
@ -17,12 +17,16 @@ limitations under the License.
|
||||
package manifest
|
||||
|
||||
import (
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/test/e2e/generated"
|
||||
)
|
||||
@ -87,6 +91,20 @@ func IngressFromManifest(fileName string) (*extensions.Ingress, error) {
|
||||
return &ing, nil
|
||||
}
|
||||
|
||||
// IngressToManifest generates a yaml file in the given path with the given ingress.
|
||||
// Assumes that a directory exists at the given path.
|
||||
func IngressToManifest(ing *extensions.Ingress, path string) error {
|
||||
serialized, err := util.MarshalToYaml(ing, extensions.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal ingress %v to YAML: %v", ing, err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(path, serialized, 0600); err != nil {
|
||||
return fmt.Errorf("error in writing ingress to file: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatefulSetFromManifest returns a StatefulSet from a manifest stored in fileName in the Namespace indicated by ns.
|
||||
func StatefulSetFromManifest(fileName, ns string) (*apps.StatefulSet, error) {
|
||||
var ss apps.StatefulSet
|
||||
|
46
vendor/k8s.io/kubernetes/test/e2e/manifest/manifest_test.go
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/test/e2e/manifest/manifest_test.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package manifest
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
)
|
||||
|
||||
func TestIngressToManifest(t *testing.T) {
|
||||
ing := &extensions.Ingress{}
|
||||
// Create a temp dir.
|
||||
tmpDir, err := ioutil.TempDir("", "kubemci")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in creating temp dir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
ingPath := filepath.Join(tmpDir, "ing.yaml")
|
||||
|
||||
// Write the ingress to a file and ensure that there is no error.
|
||||
if err := IngressToManifest(ing, ingPath); err != nil {
|
||||
t.Fatalf("Error in creating file: %s", err)
|
||||
}
|
||||
// Writing it again should not return an error.
|
||||
if err := IngressToManifest(ing, ingPath); err != nil {
|
||||
t.Fatalf("Error in creating file: %s", err)
|
||||
}
|
||||
}
|
10
vendor/k8s.io/kubernetes/test/e2e/network/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/network/BUILD
generated
vendored
@ -16,6 +16,7 @@ go_library(
|
||||
"firewall.go",
|
||||
"framework.go",
|
||||
"ingress.go",
|
||||
"ingress_scale.go",
|
||||
"kube_proxy.go",
|
||||
"network_policy.go",
|
||||
"network_tiers.go",
|
||||
@ -35,18 +36,22 @@ go_library(
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/controller/endpoint:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/network/scale:go_default_library",
|
||||
"//test/images/net/nat:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/networking/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
@ -78,6 +83,9 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/network/scale:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
351
vendor/k8s.io/kubernetes/test/e2e/network/dns.go
generated
vendored
351
vendor/k8s.io/kubernetes/test/e2e/network/dns.go
generated
vendored
@ -17,255 +17,22 @@ limitations under the License.
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const dnsTestPodHostName = "dns-querier-1"
|
||||
const dnsTestServiceName = "dns-test-service"
|
||||
|
||||
func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string) *v1.Pod {
|
||||
dnsPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dns-test-" + string(uuid.NewUUID()),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "results",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
// TODO: Consider scraping logs instead of running a webserver.
|
||||
{
|
||||
Name: "webserver",
|
||||
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 80,
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "querier",
|
||||
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
|
||||
Command: []string{"sh", "-c", wheezyProbeCmd},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "jessie-querier",
|
||||
Image: imageutils.GetE2EImage(imageutils.JessieDnsutils),
|
||||
Command: []string{"sh", "-c", jessieProbeCmd},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
dnsPod.Spec.Hostname = dnsTestPodHostName
|
||||
dnsPod.Spec.Subdomain = dnsTestServiceName
|
||||
|
||||
return dnsPod
|
||||
}
|
||||
|
||||
func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookupIP string, fileNamePrefix, namespace string) (string, []string) {
|
||||
fileNames := make([]string, 0, len(namesToResolve)*2)
|
||||
probeCmd := "for i in `seq 1 600`; do "
|
||||
for _, name := range namesToResolve {
|
||||
// Resolve by TCP and UDP DNS. Use $$(...) because $(...) is
|
||||
// expanded by kubernetes (though this won't expand so should
|
||||
// remain a literal, safe > sorry).
|
||||
lookup := "A"
|
||||
if strings.HasPrefix(name, "_") {
|
||||
lookup = "SRV"
|
||||
}
|
||||
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
|
||||
fileName = fmt.Sprintf("%s_tcp@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
|
||||
}
|
||||
|
||||
for _, name := range hostEntries {
|
||||
fileName := fmt.Sprintf("%s_hosts@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(getent hosts %s)" && echo OK > /results/%s;`, name, fileName)
|
||||
}
|
||||
|
||||
podARecByUDPFileName := fmt.Sprintf("%s_udp@PodARecord", fileNamePrefix)
|
||||
podARecByTCPFileName := fmt.Sprintf("%s_tcp@PodARecord", fileNamePrefix)
|
||||
probeCmd += fmt.Sprintf(`podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".%s.pod.cluster.local"}');`, namespace)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByUDPFileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByTCPFileName)
|
||||
fileNames = append(fileNames, podARecByUDPFileName)
|
||||
fileNames = append(fileNames, podARecByTCPFileName)
|
||||
|
||||
if len(ptrLookupIP) > 0 {
|
||||
ptrLookup := fmt.Sprintf("%s.in-addr.arpa.", strings.Join(reverseArray(strings.Split(ptrLookupIP, ".")), "."))
|
||||
ptrRecByUDPFileName := fmt.Sprintf("%s_udp@PTR", ptrLookupIP)
|
||||
ptrRecByTCPFileName := fmt.Sprintf("%s_tcp@PTR", ptrLookupIP)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByUDPFileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByTCPFileName)
|
||||
fileNames = append(fileNames, ptrRecByUDPFileName)
|
||||
fileNames = append(fileNames, ptrRecByTCPFileName)
|
||||
}
|
||||
|
||||
probeCmd += "sleep 1; done"
|
||||
return probeCmd, fileNames
|
||||
}
|
||||
|
||||
// createTargetedProbeCommand returns a command line that performs a DNS lookup for a specific record type
|
||||
func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePrefix string) (string, string) {
|
||||
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, nameToResolve)
|
||||
probeCmd := fmt.Sprintf("dig +short +tries=12 +norecurse %s %s > /results/%s", nameToResolve, lookup, fileName)
|
||||
return probeCmd, fileName
|
||||
}
|
||||
|
||||
func assertFilesExist(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) {
|
||||
assertFilesContain(fileNames, fileDir, pod, client, false, "")
|
||||
}
|
||||
|
||||
func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) {
|
||||
var failed []string
|
||||
|
||||
framework.ExpectNoError(wait.Poll(time.Second*10, time.Second*600, func() (bool, error) {
|
||||
failed = []string{}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
for _, fileName := range fileNames {
|
||||
contents, err := client.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(pod.Namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(pod.Name).
|
||||
Suffix(fileDir, fileName).
|
||||
Do().Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
} else {
|
||||
framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
}
|
||||
failed = append(failed, fileName)
|
||||
} else if check && strings.TrimSpace(string(contents)) != expected {
|
||||
framework.Logf("File %s from pod %s contains '%s' instead of '%s'", fileName, pod.Name, string(contents), expected)
|
||||
failed = append(failed, fileName)
|
||||
}
|
||||
}
|
||||
if len(failed) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
|
||||
return false, nil
|
||||
}))
|
||||
Expect(len(failed)).To(Equal(0))
|
||||
}
|
||||
|
||||
func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
}
|
||||
// Try to find results for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
assertFilesExist(fileNames, "results", pod, f.ClientSet)
|
||||
|
||||
// TODO: probe from the host, too.
|
||||
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
}
|
||||
// Try to find the expected value for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
|
||||
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func reverseArray(arr []string) []string {
|
||||
for i := 0; i < len(arr)/2; i++ {
|
||||
j := len(arr) - i - 1
|
||||
arr[i], arr[j] = arr[j], arr[i]
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("DNS", func() {
|
||||
f := framework.NewDefaultFramework("dns")
|
||||
|
||||
@ -295,7 +62,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
|
||||
})
|
||||
|
||||
@ -345,7 +112,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
pod.ObjectMeta.Labels = testServiceSelector
|
||||
|
||||
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
|
||||
@ -378,7 +145,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
pod1.ObjectMeta.Labels = testServiceSelector
|
||||
pod1.Spec.Hostname = podHostname
|
||||
pod1.Spec.Subdomain = serviceName
|
||||
@ -407,7 +174,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
|
||||
validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.")
|
||||
|
||||
@ -424,7 +191,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a second pod to probe DNS")
|
||||
pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
|
||||
validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.")
|
||||
|
||||
@ -444,11 +211,111 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a third pod to probe DNS")
|
||||
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
|
||||
svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP)
|
||||
})
|
||||
|
||||
It("should support configurable pod resolv.conf", func() {
|
||||
By("Preparing a test DNS service with injected DNS names...")
|
||||
testInjectedIP := "1.1.1.1"
|
||||
testDNSNameShort := "notexistname"
|
||||
testSearchPath := "resolv.conf.local"
|
||||
testDNSNameFull := fmt.Sprintf("%s.%s", testDNSNameShort, testSearchPath)
|
||||
|
||||
testServerPod := generateDNSServerPod(map[string]string{
|
||||
testDNSNameFull: testInjectedIP,
|
||||
})
|
||||
testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s", testServerPod.Name)
|
||||
framework.Logf("Created pod %v", testServerPod)
|
||||
defer func() {
|
||||
framework.Logf("Deleting pod %s...", testServerPod.Name)
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("Failed to delete pod %s: %v", testServerPod.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name)
|
||||
|
||||
// Retrieve server pod IP.
|
||||
testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %v", testServerPod.Name)
|
||||
testServerIP := testServerPod.Status.PodIP
|
||||
framework.Logf("testServerIP is %s", testServerIP)
|
||||
|
||||
By("Creating a pod with dnsPolicy=None and customized dnsConfig...")
|
||||
testUtilsPod := generateDNSUtilsPod()
|
||||
testUtilsPod.Spec.DNSPolicy = v1.DNSNone
|
||||
testNdotsValue := "2"
|
||||
testUtilsPod.Spec.DNSConfig = &v1.PodDNSConfig{
|
||||
Nameservers: []string{testServerIP},
|
||||
Searches: []string{testSearchPath},
|
||||
Options: []v1.PodDNSConfigOption{
|
||||
{
|
||||
Name: "ndots",
|
||||
Value: &testNdotsValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s", testUtilsPod.Name)
|
||||
framework.Logf("Created pod %v", testUtilsPod)
|
||||
defer func() {
|
||||
framework.Logf("Deleting pod %s...", testUtilsPod.Name)
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name)
|
||||
|
||||
By("Verifying customized DNS option is configured on pod...")
|
||||
// TODO: Figure out a better way other than checking the actual resolv,conf file.
|
||||
cmd := []string{"cat", "/etc/resolv.conf"}
|
||||
stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{
|
||||
Command: cmd,
|
||||
Namespace: f.Namespace.Name,
|
||||
PodName: testUtilsPod.Name,
|
||||
ContainerName: "util",
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err)
|
||||
if !strings.Contains(stdout, "ndots:2") {
|
||||
framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout)
|
||||
}
|
||||
|
||||
By("Verifying customized name server and search path are working...")
|
||||
// Do dig on not-exist-dns-name and see if the injected DNS record is returned.
|
||||
// This verifies both:
|
||||
// - Custom search path is appended.
|
||||
// - DNS query is sent to the specified server.
|
||||
cmd = []string{"/usr/bin/dig", "+short", "+search", testDNSNameShort}
|
||||
digFunc := func() (bool, error) {
|
||||
stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{
|
||||
Command: cmd,
|
||||
Namespace: f.Namespace.Name,
|
||||
PodName: testUtilsPod.Name,
|
||||
ContainerName: "util",
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err)
|
||||
return false, nil
|
||||
}
|
||||
res := strings.Split(stdout, "\n")
|
||||
if len(res) != 1 || res[0] != testInjectedIP {
|
||||
framework.Logf("Expect command `%v` to return %s, got: %v", cmd, testInjectedIP, res)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
err = wait.PollImmediate(5*time.Second, 3*time.Minute, digFunc)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to verify customized name server and search path")
|
||||
|
||||
// TODO: Add more test cases for other DNSPolicies.
|
||||
})
|
||||
})
|
||||
|
268
vendor/k8s.io/kubernetes/test/e2e/network/dns_common.go
generated
vendored
268
vendor/k8s.io/kubernetes/test/e2e/network/dns_common.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@ -27,8 +28,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -229,20 +232,19 @@ func (t *dnsTestCommon) deleteUtilPod() {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
|
||||
t.dnsServerPod = &v1.Pod{
|
||||
func generateDNSServerPod(aRecords map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: t.f.Namespace.Name,
|
||||
GenerateName: "e2e-dns-configmap-dns-server-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dns",
|
||||
Image: "gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5",
|
||||
Image: imageutils.GetE2EImage(imageutils.DNSMasq),
|
||||
Command: []string{
|
||||
"/usr/sbin/dnsmasq",
|
||||
"-u", "root",
|
||||
@ -257,10 +259,15 @@ func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
|
||||
}
|
||||
|
||||
for name, ip := range aRecords {
|
||||
t.dnsServerPod.Spec.Containers[0].Command = append(
|
||||
t.dnsServerPod.Spec.Containers[0].Command,
|
||||
pod.Spec.Containers[0].Command = append(
|
||||
pod.Spec.Containers[0].Command,
|
||||
fmt.Sprintf("-A/%v/%v", name, ip))
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
|
||||
t.dnsServerPod = generateDNSServerPod(aRecords)
|
||||
|
||||
var err error
|
||||
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
|
||||
@ -280,3 +287,252 @@ func (t *dnsTestCommon) deleteDNSServerPod() {
|
||||
t.utilPod.Namespace, t.dnsServerPod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd, podHostName, serviceName string) *v1.Pod {
|
||||
dnsPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dns-test-" + string(uuid.NewUUID()),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "results",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
// TODO: Consider scraping logs instead of running a webserver.
|
||||
{
|
||||
Name: "webserver",
|
||||
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 80,
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "querier",
|
||||
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
|
||||
Command: []string{"sh", "-c", wheezyProbeCmd},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "jessie-querier",
|
||||
Image: imageutils.GetE2EImage(imageutils.JessieDnsutils),
|
||||
Command: []string{"sh", "-c", jessieProbeCmd},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
dnsPod.Spec.Hostname = podHostName
|
||||
dnsPod.Spec.Subdomain = serviceName
|
||||
|
||||
return dnsPod
|
||||
}
|
||||
|
||||
func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookupIP string, fileNamePrefix, namespace string) (string, []string) {
|
||||
fileNames := make([]string, 0, len(namesToResolve)*2)
|
||||
probeCmd := "for i in `seq 1 600`; do "
|
||||
for _, name := range namesToResolve {
|
||||
// Resolve by TCP and UDP DNS. Use $$(...) because $(...) is
|
||||
// expanded by kubernetes (though this won't expand so should
|
||||
// remain a literal, safe > sorry).
|
||||
lookup := "A"
|
||||
if strings.HasPrefix(name, "_") {
|
||||
lookup = "SRV"
|
||||
}
|
||||
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
|
||||
fileName = fmt.Sprintf("%s_tcp@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
|
||||
}
|
||||
|
||||
for _, name := range hostEntries {
|
||||
fileName := fmt.Sprintf("%s_hosts@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(getent hosts %s)" && echo OK > /results/%s;`, name, fileName)
|
||||
}
|
||||
|
||||
podARecByUDPFileName := fmt.Sprintf("%s_udp@PodARecord", fileNamePrefix)
|
||||
podARecByTCPFileName := fmt.Sprintf("%s_tcp@PodARecord", fileNamePrefix)
|
||||
probeCmd += fmt.Sprintf(`podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".%s.pod.cluster.local"}');`, namespace)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByUDPFileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByTCPFileName)
|
||||
fileNames = append(fileNames, podARecByUDPFileName)
|
||||
fileNames = append(fileNames, podARecByTCPFileName)
|
||||
|
||||
if len(ptrLookupIP) > 0 {
|
||||
ptrLookup := fmt.Sprintf("%s.in-addr.arpa.", strings.Join(reverseArray(strings.Split(ptrLookupIP, ".")), "."))
|
||||
ptrRecByUDPFileName := fmt.Sprintf("%s_udp@PTR", ptrLookupIP)
|
||||
ptrRecByTCPFileName := fmt.Sprintf("%s_tcp@PTR", ptrLookupIP)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByUDPFileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByTCPFileName)
|
||||
fileNames = append(fileNames, ptrRecByUDPFileName)
|
||||
fileNames = append(fileNames, ptrRecByTCPFileName)
|
||||
}
|
||||
|
||||
probeCmd += "sleep 1; done"
|
||||
return probeCmd, fileNames
|
||||
}
|
||||
|
||||
// createTargetedProbeCommand returns a command line that performs a DNS lookup for a specific record type
|
||||
func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePrefix string) (string, string) {
|
||||
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, nameToResolve)
|
||||
probeCmd := fmt.Sprintf("dig +short +tries=12 +norecurse %s %s > /results/%s", nameToResolve, lookup, fileName)
|
||||
return probeCmd, fileName
|
||||
}
|
||||
|
||||
func assertFilesExist(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) {
|
||||
assertFilesContain(fileNames, fileDir, pod, client, false, "")
|
||||
}
|
||||
|
||||
func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) {
|
||||
var failed []string
|
||||
|
||||
framework.ExpectNoError(wait.Poll(time.Second*10, time.Second*600, func() (bool, error) {
|
||||
failed = []string{}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
for _, fileName := range fileNames {
|
||||
contents, err := client.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(pod.Namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(pod.Name).
|
||||
Suffix(fileDir, fileName).
|
||||
Do().Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
} else {
|
||||
framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
}
|
||||
failed = append(failed, fileName)
|
||||
} else if check && strings.TrimSpace(string(contents)) != expected {
|
||||
framework.Logf("File %s from pod %s contains '%s' instead of '%s'", fileName, pod.Name, string(contents), expected)
|
||||
failed = append(failed, fileName)
|
||||
}
|
||||
}
|
||||
if len(failed) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
|
||||
return false, nil
|
||||
}))
|
||||
Expect(len(failed)).To(Equal(0))
|
||||
}
|
||||
|
||||
func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
}
|
||||
// Try to find results for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
assertFilesExist(fileNames, "results", pod, f.ClientSet)
|
||||
|
||||
// TODO: probe from the host, too.
|
||||
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
}
|
||||
// Try to find the expected value for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
|
||||
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func reverseArray(arr []string) []string {
|
||||
for i := 0; i < len(arr)/2; i++ {
|
||||
j := len(arr) - i - 1
|
||||
arr[i], arr[j] = arr[j], arr[i]
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
func generateDNSUtilsPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "e2e-dns-utils-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "util",
|
||||
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
|
||||
Command: []string{"sleep", "10000"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/network/firewall.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/network/firewall.go
generated
vendored
@ -170,7 +170,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
|
||||
Expect(len(nodeAddrs)).NotTo(BeZero())
|
||||
masterAddr := framework.GetMasterAddress(cs)
|
||||
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.ControllerManagerPort, framework.FirewallTestTcpTimeout)
|
||||
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, framework.FirewallTestTcpTimeout)
|
||||
Expect(flag).To(BeTrue())
|
||||
flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, framework.FirewallTestTcpTimeout)
|
||||
Expect(flag).To(BeTrue())
|
||||
|
305
vendor/k8s.io/kubernetes/test/e2e/network/ingress.go
generated
vendored
305
vendor/k8s.io/kubernetes/test/e2e/network/ingress.go
generated
vendored
@ -18,14 +18,22 @@ package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -83,7 +91,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
gceController.Init()
|
||||
err := gceController.Init()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
@ -99,7 +108,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
jig.TryDeleteIngress()
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
framework.CleanupGCEIngressController(gceController)
|
||||
Expect(gceController.CleanupGCEIngressController()).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
@ -118,8 +127,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ns, ip))
|
||||
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), ns, map[string]string{
|
||||
"kubernetes.io/ingress.global-static-ip-name": ns,
|
||||
"kubernetes.io/ingress.allow-http": "false",
|
||||
framework.IngressStaticIPKey: ns,
|
||||
framework.IngressAllowHTTPKey: "false",
|
||||
}, map[string]string{})
|
||||
|
||||
By("waiting for Ingress to come up with ip: " + ip)
|
||||
@ -153,10 +162,244 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
// framework.ExpectNoError(jig.verifyURL(fmt.Sprintf("https://%v/", ip), "", 30, 1*time.Second, httpClient))
|
||||
})
|
||||
|
||||
It("should update ingress while sync failures occur on other ingresses", func() {
|
||||
By("Creating ingresses that would fail on sync.")
|
||||
ingFailTLSBackend := &extensions.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ing-fail-on-tls-backend",
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
TLS: []extensions.IngressTLS{
|
||||
{SecretName: "tls-secret-notexist"},
|
||||
},
|
||||
Backend: &extensions.IngressBackend{
|
||||
ServiceName: "echoheaders-notexist",
|
||||
ServicePort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := jig.Client.ExtensionsV1beta1().Ingresses(ns).Create(ingFailTLSBackend)
|
||||
defer func() {
|
||||
if err := jig.Client.ExtensionsV1beta1().Ingresses(ns).Delete(ingFailTLSBackend.Name, nil); err != nil {
|
||||
framework.Logf("Failed to delete ingress %s: %v", ingFailTLSBackend.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ingFailRules := &extensions.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ing-fail-on-rules",
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
Rules: []extensions.IngressRule{
|
||||
{
|
||||
Host: "foo.bar.com",
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: []extensions.HTTPIngressPath{
|
||||
{
|
||||
Path: "/foo",
|
||||
Backend: extensions.IngressBackend{
|
||||
ServiceName: "echoheaders-notexist",
|
||||
ServicePort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = jig.Client.ExtensionsV1beta1().Ingresses(ns).Create(ingFailRules)
|
||||
defer func() {
|
||||
if err := jig.Client.ExtensionsV1beta1().Ingresses(ns).Delete(ingFailRules.Name, nil); err != nil {
|
||||
framework.Logf("Failed to delete ingress %s: %v", ingFailRules.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a basic HTTP ingress and wait for it to come up")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, nil, nil)
|
||||
jig.WaitForIngress(true)
|
||||
|
||||
By("Updating the path on ingress and wait for it to take effect")
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
updatedRule := extensions.IngressRule{
|
||||
Host: "ingress.test.com",
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: []extensions.HTTPIngressPath{
|
||||
{
|
||||
Path: "/test",
|
||||
// Copy backend from the first rule.
|
||||
Backend: ing.Spec.Rules[0].HTTP.Paths[0].Backend,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Replace the first rule.
|
||||
ing.Spec.Rules[0] = updatedRule
|
||||
})
|
||||
// Wait for change to take effect on the updated ingress.
|
||||
jig.WaitForIngress(false)
|
||||
})
|
||||
|
||||
It("should not reconcile manually modified health check for ingress", func() {
|
||||
By("Creating a basic HTTP ingress and wait for it to come up.")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, nil, nil)
|
||||
jig.WaitForIngress(true)
|
||||
|
||||
// Get cluster UID.
|
||||
clusterID, err := framework.GetClusterID(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Get the related nodeports.
|
||||
nodePorts := jig.GetIngressNodePorts(false)
|
||||
Expect(len(nodePorts)).ToNot(Equal(0))
|
||||
|
||||
// Filter health check using cluster UID as the suffix.
|
||||
By("Retrieving relevant health check resources from GCE.")
|
||||
gceCloud := gceController.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
hcs, err := gceCloud.ListHealthChecks()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var hcToChange *compute.HealthCheck
|
||||
for _, hc := range hcs {
|
||||
if strings.HasSuffix(hc.Name, clusterID) {
|
||||
Expect(hc.HttpHealthCheck).NotTo(BeNil())
|
||||
if fmt.Sprintf("%d", hc.HttpHealthCheck.Port) == nodePorts[0] {
|
||||
hcToChange = hc
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
Expect(hcToChange).NotTo(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Modifying health check %v without involving ingress.", hcToChange.Name))
|
||||
// Change timeout from 60s to 25s.
|
||||
hcToChange.TimeoutSec = 25
|
||||
// Change path from /healthz to /.
|
||||
hcToChange.HttpHealthCheck.RequestPath = "/"
|
||||
err = gceCloud.UpdateHealthCheck(hcToChange)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Add one more path to ingress to trigger resource syncing.
|
||||
By("Adding a new path to ingress and wait for it to take effect.")
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.Rules = append(ing.Spec.Rules, extensions.IngressRule{
|
||||
Host: "ingress.test.com",
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: []extensions.HTTPIngressPath{
|
||||
{
|
||||
Path: "/test",
|
||||
// Copy backend from the first rule.
|
||||
Backend: ing.Spec.Rules[0].HTTP.Paths[0].Backend,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
// Wait for change to take effect before checking the health check resource.
|
||||
jig.WaitForIngress(false)
|
||||
|
||||
// Validate the modified fields on health check are intact.
|
||||
By("Checking if the modified health check is unchanged.")
|
||||
hcAfterSync, err := gceCloud.GetHealthCheck(hcToChange.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(hcAfterSync.HttpHealthCheck).ToNot(Equal(nil))
|
||||
Expect(hcAfterSync.TimeoutSec).To(Equal(hcToChange.TimeoutSec))
|
||||
Expect(hcAfterSync.HttpHealthCheck.RequestPath).To(Equal(hcToChange.HttpHealthCheck.RequestPath))
|
||||
})
|
||||
|
||||
It("should create ingress with pre-shared certificate", func() {
|
||||
preSharedCertName := "test-pre-shared-cert"
|
||||
By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
|
||||
testHostname := "test.ingress.com"
|
||||
cert, key, err := framework.GenerateRSACerts(testHostname, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
// We would not be able to delete the cert until ingress controller
|
||||
// cleans up the target proxy that references it.
|
||||
By("Deleting ingress before deleting ssl certificate")
|
||||
if jig.Ingress != nil {
|
||||
jig.TryDeleteIngress()
|
||||
}
|
||||
By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
|
||||
err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
|
||||
framework.Logf("Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to delete ssl certificate %q: %v", preSharedCertName, err))
|
||||
}()
|
||||
_, err = gceCloud.CreateSslCertificate(&compute.SslCertificate{
|
||||
Name: preSharedCertName,
|
||||
Certificate: string(cert),
|
||||
PrivateKey: string(key),
|
||||
Description: "pre-shared cert for ingress testing",
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create ssl certificate %q: %v", preSharedCertName, err))
|
||||
|
||||
By("Creating an ingress referencing the pre-shared certificate")
|
||||
// Create an ingress referencing this cert using pre-shared-cert annotation.
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "pre-shared-cert"), ns, map[string]string{
|
||||
framework.IngressPreSharedCertKey: preSharedCertName,
|
||||
framework.IngressAllowHTTPKey: "false",
|
||||
}, map[string]string{})
|
||||
|
||||
By("Test that ingress works with the pre-shared certificate")
|
||||
err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
})
|
||||
|
||||
It("should create ingress with backside re-encryption", func() {
|
||||
By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
|
||||
deployCreated, svcCreated, ingCreated, err := framework.CreateReencryptionIngress(f.ClientSet, f.Namespace.Name)
|
||||
defer func() {
|
||||
By("Cleaning up re-encryption ingress, service and deployment")
|
||||
if errs := framework.CleanupReencryptionIngress(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 {
|
||||
framework.Failf("Failed to cleanup re-encryption ingress: %v", errs)
|
||||
}
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create re-encryption ingress")
|
||||
|
||||
By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name))
|
||||
ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP")
|
||||
|
||||
By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
|
||||
timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout}
|
||||
err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
|
||||
if err != nil {
|
||||
framework.Logf("SimpleGET failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if !strings.Contains(resp, "request_scheme=https") {
|
||||
return false, fmt.Errorf("request wasn't served by HTTPS, response body: %s", resp)
|
||||
}
|
||||
framework.Logf("Poll succeeded, request was served by HTTPS")
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress")
|
||||
})
|
||||
|
||||
It("multicluster ingress should get instance group annotation", func() {
|
||||
name := "echomap"
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, map[string]string{
|
||||
framework.IngressClass: framework.MulticlusterIngressClassValue,
|
||||
framework.IngressClassKey: framework.MulticlusterIngressClassValue,
|
||||
}, map[string]string{})
|
||||
|
||||
By(fmt.Sprintf("waiting for Ingress %s to come up", name))
|
||||
@ -179,6 +422,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
// TODO: Implement a multizone e2e that verifies traffic reaches each
|
||||
// zone based on pod labels.
|
||||
})
|
||||
|
||||
Describe("GCE [Slow] [Feature:NEG]", func() {
|
||||
var gceController *framework.GCEIngressController
|
||||
|
||||
@ -191,7 +435,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
gceController.Init()
|
||||
err := gceController.Init()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
@ -207,7 +452,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
jig.TryDeleteIngress()
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
framework.CleanupGCEIngressController(gceController)
|
||||
Expect(gceController.CleanupGCEIngressController()).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
@ -272,12 +517,12 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
wait.Poll(5*time.Second, NEGUpdateTimeout, func() (bool, error) {
|
||||
wait.Poll(10*time.Second, NEGUpdateTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress()
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
return res.Len() == num, err
|
||||
return res.Len() == num, nil
|
||||
})
|
||||
}
|
||||
|
||||
@ -319,12 +564,12 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
scale.Spec.Replicas = int32(replicas)
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress()
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
return res.Len() == replicas, err
|
||||
return res.Len() == replicas, nil
|
||||
})
|
||||
|
||||
By("Trigger rolling update and observe service disruption")
|
||||
@ -335,7 +580,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).Update(deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
wait.Poll(30*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
deploy, err := f.ClientSet.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||
@ -356,6 +601,38 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GCE [Slow] [Feature:kubemci]", func() {
|
||||
// Platform specific setup
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
jig.Class = framework.MulticlusterIngressClassValue
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
By("Deleting ingress")
|
||||
jig.TryDeleteIngress()
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
t.Execute()
|
||||
By(t.ExitLog)
|
||||
jig.WaitForIngress(true /*waitForNodePort*/)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Time: borderline 5m, slow by design
|
||||
Describe("[Slow] Nginx", func() {
|
||||
var nginxController *framework.NginxIngressController
|
||||
|
64
vendor/k8s.io/kubernetes/test/e2e/network/ingress_scale.go
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/test/e2e/network/ingress_scale.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/network/scale"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() {
|
||||
defer GinkgoRecover()
|
||||
var (
|
||||
ns string
|
||||
)
|
||||
f := framework.NewDefaultFramework("ingress-scale")
|
||||
|
||||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() {
|
||||
var (
|
||||
scaleFramework *scale.IngressScaleFramework
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig)
|
||||
if err := scaleFramework.PrepareScaleTest(); err != nil {
|
||||
framework.Failf("Unexpected error while preraring ingress scale test: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 {
|
||||
framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs)
|
||||
}
|
||||
})
|
||||
|
||||
It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() {
|
||||
if errs := scaleFramework.RunScaleTest(); len(errs) != 0 {
|
||||
framework.Failf("Unexpected error while running ingress scale test: %v", errs)
|
||||
}
|
||||
|
||||
})
|
||||
})
|
||||
})
|
10
vendor/k8s.io/kubernetes/test/e2e/network/kube_proxy.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/network/kube_proxy.go
generated
vendored
@ -171,19 +171,19 @@ var _ = SIGDescribe("Network", func() {
|
||||
// If test flakes occur here, then this check should be performed
|
||||
// in a loop as there may be a race with the client connecting.
|
||||
framework.IssueSSHCommandWithResult(
|
||||
fmt.Sprintf("sudo cat /proc/net/ip_conntrack | grep 'dport=%v'",
|
||||
fmt.Sprintf("sudo cat /proc/net/nf_conntrack | grep 'dport=%v'",
|
||||
testDaemonTcpPort),
|
||||
framework.TestContext.Provider,
|
||||
clientNodeInfo.node)
|
||||
|
||||
// Timeout in seconds is available as the third column from
|
||||
// /proc/net/ip_conntrack.
|
||||
// Timeout in seconds is available as the fifth column from
|
||||
// /proc/net/nf_conntrack.
|
||||
result, err := framework.IssueSSHCommandWithResult(
|
||||
fmt.Sprintf(
|
||||
"sudo cat /proc/net/ip_conntrack "+
|
||||
"sudo cat /proc/net/nf_conntrack "+
|
||||
"| grep 'CLOSE_WAIT.*dst=%v.*dport=%v' "+
|
||||
"| tail -n 1"+
|
||||
"| awk '{print $3}' ",
|
||||
"| awk '{print $5}' ",
|
||||
serverNodeInfo.nodeIp,
|
||||
testDaemonTcpPort),
|
||||
framework.TestContext.Provider,
|
||||
|
29
vendor/k8s.io/kubernetes/test/e2e/network/network_tiers.go
generated
vendored
29
vendor/k8s.io/kubernetes/test/e2e/network/network_tiers.go
generated
vendored
@ -26,9 +26,9 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -39,14 +39,12 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
f := framework.NewDefaultFramework("services")
|
||||
|
||||
var cs clientset.Interface
|
||||
var internalClientset internalclientset.Interface
|
||||
serviceLBNames := []string{}
|
||||
|
||||
BeforeEach(func() {
|
||||
// This test suite requires the GCE environment.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
cs = f.ClientSet
|
||||
internalClientset = f.InternalClientset
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
@ -75,12 +73,12 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
By("creating a Service of type LoadBalancer using the standard network tier")
|
||||
svc := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard)
|
||||
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard.ToGCEValue())
|
||||
})
|
||||
// Verify that service has been updated properly.
|
||||
svcTier, err := gcecloud.GetServiceNetworkTier(svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(svcTier).To(Equal(gcecloud.NetworkTierStandard))
|
||||
Expect(svcTier).To(Equal(cloud.NetworkTierStandard))
|
||||
// Record the LB name for test cleanup.
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
|
||||
|
||||
@ -95,7 +93,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
// Verify that service has been updated properly.
|
||||
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(svcTier).To(Equal(gcecloud.NetworkTierDefault))
|
||||
Expect(svcTier).To(Equal(cloud.NetworkTierDefault))
|
||||
|
||||
// Wait until the ingress IP changes. Each tier has its own pool of
|
||||
// IPs, so changing tiers implies changing IPs.
|
||||
@ -104,12 +102,14 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
// Test 3: create a standard-tierd LB with a user-requested IP.
|
||||
By("reserving a static IP for the load balancer")
|
||||
requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunId)
|
||||
requestedIP, err := reserveAlphaRegionalAddress(requestedAddrName, gcecloud.NetworkTierStandard)
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to reserve a STANDARD tiered address")
|
||||
defer func() {
|
||||
if requestedAddrName != "" {
|
||||
// Release GCE static address - this is not kube-managed and will not be automatically released.
|
||||
if err := framework.DeleteGCEStaticIP(requestedAddrName); err != nil {
|
||||
if err := gceCloud.DeleteRegionAddress(requestedAddrName, gceCloud.Region()); err != nil {
|
||||
framework.Logf("failed to release static IP address %q: %v", requestedAddrName, err)
|
||||
}
|
||||
}
|
||||
@ -120,13 +120,13 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
By("updating the Service to use the standard tier with a requested IP")
|
||||
svc = jig.UpdateServiceOrFail(ns, svc.Name, func(svc *v1.Service) {
|
||||
svc.Spec.LoadBalancerIP = requestedIP
|
||||
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard)
|
||||
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard.ToGCEValue())
|
||||
})
|
||||
// Verify that service has been updated properly.
|
||||
Expect(svc.Spec.LoadBalancerIP).To(Equal(requestedIP))
|
||||
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(svcTier).To(Equal(gcecloud.NetworkTierStandard))
|
||||
Expect(svcTier).To(Equal(cloud.NetworkTierStandard))
|
||||
|
||||
// Wait until the ingress IP changes and verifies the LB.
|
||||
ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout)
|
||||
@ -169,7 +169,7 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin
|
||||
return ingressIP
|
||||
}
|
||||
|
||||
func getLBNetworkTierByIP(ip string) (gcecloud.NetworkTier, error) {
|
||||
func getLBNetworkTierByIP(ip string) (cloud.NetworkTier, error) {
|
||||
var rule *computealpha.ForwardingRule
|
||||
// Retry a few times to tolerate flakes.
|
||||
err := wait.PollImmediate(5*time.Second, 15*time.Second, func() (bool, error) {
|
||||
@ -183,7 +183,7 @@ func getLBNetworkTierByIP(ip string) (gcecloud.NetworkTier, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gcecloud.NetworkTierGCEValueToType(rule.NetworkTier), nil
|
||||
return cloud.NetworkTierGCEValueToType(rule.NetworkTier), nil
|
||||
}
|
||||
|
||||
func getGCEForwardingRuleByIP(ip string) (*computealpha.ForwardingRule, error) {
|
||||
@ -195,7 +195,7 @@ func getGCEForwardingRuleByIP(ip string) (*computealpha.ForwardingRule, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, rule := range ruleList.Items {
|
||||
for _, rule := range ruleList {
|
||||
if rule.IPAddress == ip {
|
||||
return rule, nil
|
||||
}
|
||||
@ -221,8 +221,7 @@ func clearNetworkTier(svc *v1.Service) {
|
||||
|
||||
// TODO: add retries if this turns out to be flaky.
|
||||
// TODO(#51665): remove this helper function once Network Tiers becomes beta.
|
||||
func reserveAlphaRegionalAddress(name string, netTier gcecloud.NetworkTier) (string, error) {
|
||||
cloud, err := framework.GetGCECloud()
|
||||
func reserveAlphaRegionalAddress(cloud *gcecloud.GCECloud, name string, netTier cloud.NetworkTier) (string, error) {
|
||||
alphaAddr := &computealpha.Address{
|
||||
Name: name,
|
||||
NetworkTier: netTier.ToGCEValue(),
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go
generated
vendored
@ -149,9 +149,9 @@ func networkingIPerfTest(isIPv6 bool) {
|
||||
}
|
||||
})
|
||||
}
|
||||
fmt.Println("[begin] Node,Bandwith CSV")
|
||||
fmt.Println("[begin] Node,Bandwidth CSV")
|
||||
fmt.Println(iperfResults.ToTSV())
|
||||
fmt.Println("[end] Node,Bandwith CSV")
|
||||
fmt.Println("[end] Node,Bandwidth CSV")
|
||||
|
||||
for ipClient, bandwidth := range iperfResults.BandwidthMap {
|
||||
framework.Logf("%v had bandwidth %v. Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth))
|
||||
|
45
vendor/k8s.io/kubernetes/test/e2e/network/proxy.go
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/network/proxy.go
generated
vendored
@ -61,22 +61,6 @@ var _ = SIGDescribe("Proxy", func() {
|
||||
f := framework.NewFramework("proxy", options, nil)
|
||||
prefix := "/api/" + version
|
||||
|
||||
// Port here has to be kept in sync with default kubelet port.
|
||||
/*
|
||||
Testname: proxy-prefix-node-logs-port
|
||||
Description: Ensure that proxy on node logs works with generic top
|
||||
level prefix proxy and explicit kubelet port.
|
||||
*/
|
||||
framework.ConformanceIt("should proxy logs on node with explicit kubelet port ", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":10250/logs/") })
|
||||
|
||||
/*
|
||||
Testname: proxy-prefix-node-logs
|
||||
Description: Ensure that proxy on node logs works with generic top
|
||||
level prefix proxy.
|
||||
*/
|
||||
framework.ConformanceIt("should proxy logs on node ", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", "/logs/") })
|
||||
It("should proxy to cadvisor", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":4194/containers/") })
|
||||
|
||||
/*
|
||||
Testname: proxy-subresource-node-logs-port
|
||||
Description: Ensure that proxy on node logs works with node proxy
|
||||
@ -185,36 +169,15 @@ var _ = SIGDescribe("Proxy", func() {
|
||||
|
||||
// table constructors
|
||||
// Try proxying through the service and directly to through the pod.
|
||||
svcProxyURL := func(scheme, port string) string {
|
||||
return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port)
|
||||
}
|
||||
subresourceServiceProxyURL := func(scheme, port string) string {
|
||||
return prefix + "/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port) + "/proxy"
|
||||
}
|
||||
podProxyURL := func(scheme, port string) string {
|
||||
return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port)
|
||||
}
|
||||
subresourcePodProxyURL := func(scheme, port string) string {
|
||||
return prefix + "/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port) + "/proxy"
|
||||
}
|
||||
|
||||
// construct the table
|
||||
expectations := map[string]string{
|
||||
svcProxyURL("", "portname1") + "/": "foo",
|
||||
svcProxyURL("", "80") + "/": "foo",
|
||||
svcProxyURL("", "portname2") + "/": "bar",
|
||||
svcProxyURL("", "81") + "/": "bar",
|
||||
|
||||
svcProxyURL("http", "portname1") + "/": "foo",
|
||||
svcProxyURL("http", "80") + "/": "foo",
|
||||
svcProxyURL("http", "portname2") + "/": "bar",
|
||||
svcProxyURL("http", "81") + "/": "bar",
|
||||
|
||||
svcProxyURL("https", "tlsportname1") + "/": "tls baz",
|
||||
svcProxyURL("https", "443") + "/": "tls baz",
|
||||
svcProxyURL("https", "tlsportname2") + "/": "tls qux",
|
||||
svcProxyURL("https", "444") + "/": "tls qux",
|
||||
|
||||
subresourceServiceProxyURL("", "portname1") + "/": "foo",
|
||||
subresourceServiceProxyURL("http", "portname1") + "/": "foo",
|
||||
subresourceServiceProxyURL("", "portname2") + "/": "bar",
|
||||
@ -222,14 +185,6 @@ var _ = SIGDescribe("Proxy", func() {
|
||||
subresourceServiceProxyURL("https", "tlsportname1") + "/": "tls baz",
|
||||
subresourceServiceProxyURL("https", "tlsportname2") + "/": "tls qux",
|
||||
|
||||
podProxyURL("", "1080") + "/": `<a href="` + podProxyURL("", "1080") + `/rewriteme">test</a>`,
|
||||
podProxyURL("", "160") + "/": "foo",
|
||||
podProxyURL("", "162") + "/": "bar",
|
||||
|
||||
podProxyURL("http", "1080") + "/": `<a href="` + podProxyURL("http", "1080") + `/rewriteme">test</a>`,
|
||||
podProxyURL("http", "160") + "/": "foo",
|
||||
podProxyURL("http", "162") + "/": "bar",
|
||||
|
||||
subresourcePodProxyURL("", "") + "/": `<a href="` + subresourcePodProxyURL("", "") + `/rewriteme">test</a>`,
|
||||
subresourcePodProxyURL("", "1080") + "/": `<a href="` + subresourcePodProxyURL("", "1080") + `/rewriteme">test</a>`,
|
||||
subresourcePodProxyURL("http", "1080") + "/": `<a href="` + subresourcePodProxyURL("http", "1080") + `/rewriteme">test</a>`,
|
||||
|
@ -2,26 +2,15 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"framework.go",
|
||||
"ubernetes_lite.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/multicluster",
|
||||
srcs = ["ingress.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/network/scale",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -35,7 +24,10 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/network/scale/localrun:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
463
vendor/k8s.io/kubernetes/test/e2e/network/scale/ingress.go
generated
vendored
Normal file
463
vendor/k8s.io/kubernetes/test/e2e/network/scale/ingress.go
generated
vendored
Normal file
@ -0,0 +1,463 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
numIngressesSmall = 5
|
||||
numIngressesMedium = 20
|
||||
numIngressesLarge = 50
|
||||
numIngressesExtraLarge = 99
|
||||
|
||||
scaleTestIngressNamePrefix = "ing-scale"
|
||||
scaleTestBackendName = "echoheaders-scale"
|
||||
scaleTestSecretName = "tls-secret-scale"
|
||||
scaleTestHostname = "scale.ingress.com"
|
||||
scaleTestNumBackends = 10
|
||||
scaleTestPollInterval = 15 * time.Second
|
||||
|
||||
// We don't expect waitForIngress to take longer
|
||||
// than waitForIngressMaxTimeout.
|
||||
waitForIngressMaxTimeout = 80 * time.Minute
|
||||
ingressesCleanupTimeout = 80 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
scaleTestLabels = map[string]string{
|
||||
"app": scaleTestBackendName,
|
||||
}
|
||||
)
|
||||
|
||||
// IngressScaleFramework defines the framework for ingress scale testing.
|
||||
type IngressScaleFramework struct {
|
||||
Clientset clientset.Interface
|
||||
Jig *framework.IngressTestJig
|
||||
GCEController *framework.GCEIngressController
|
||||
CloudConfig framework.CloudConfig
|
||||
Logger framework.TestLogger
|
||||
|
||||
Namespace string
|
||||
EnableTLS bool
|
||||
NumIngressesTest []int
|
||||
OutputFile string
|
||||
|
||||
ScaleTestDeploy *extensions.Deployment
|
||||
ScaleTestSvcs []*v1.Service
|
||||
ScaleTestIngs []*extensions.Ingress
|
||||
|
||||
// BatchCreateLatencies stores all ingress creation latencies, in different
|
||||
// batches.
|
||||
BatchCreateLatencies [][]time.Duration
|
||||
// BatchDurations stores the total duration for each ingress batch creation.
|
||||
BatchDurations []time.Duration
|
||||
// StepCreateLatencies stores the single ingress creation latency, which happens
|
||||
// after each ingress batch creation is complete.
|
||||
StepCreateLatencies []time.Duration
|
||||
// StepCreateLatencies stores the single ingress update latency, which happens
|
||||
// after each ingress batch creation is complete.
|
||||
StepUpdateLatencies []time.Duration
|
||||
}
|
||||
|
||||
// NewIngressScaleFramework returns a new framework for ingress scale testing.
|
||||
func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig framework.CloudConfig) *IngressScaleFramework {
|
||||
return &IngressScaleFramework{
|
||||
Namespace: ns,
|
||||
Clientset: cs,
|
||||
CloudConfig: cloudConfig,
|
||||
Logger: &framework.E2ELogger{},
|
||||
EnableTLS: true,
|
||||
NumIngressesTest: []int{
|
||||
numIngressesSmall,
|
||||
numIngressesMedium,
|
||||
numIngressesLarge,
|
||||
numIngressesExtraLarge,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PrepareScaleTest prepares framework for ingress scale testing.
|
||||
func (f *IngressScaleFramework) PrepareScaleTest() error {
|
||||
f.Logger.Infof("Initializing ingress test suite and gce controller...")
|
||||
f.Jig = framework.NewIngressTestJig(f.Clientset)
|
||||
f.Jig.Logger = f.Logger
|
||||
f.Jig.PollInterval = scaleTestPollInterval
|
||||
f.GCEController = &framework.GCEIngressController{
|
||||
Client: f.Clientset,
|
||||
Cloud: f.CloudConfig,
|
||||
}
|
||||
if err := f.GCEController.Init(); err != nil {
|
||||
return fmt.Errorf("Failed to initialize GCE controller: %v", err)
|
||||
}
|
||||
|
||||
f.ScaleTestSvcs = []*v1.Service{}
|
||||
f.ScaleTestIngs = []*extensions.Ingress{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanupScaleTest cleans up framework for ingress scale testing.
|
||||
func (f *IngressScaleFramework) CleanupScaleTest() []error {
|
||||
var errs []error
|
||||
|
||||
f.Logger.Infof("Cleaning up ingresses...")
|
||||
for _, ing := range f.ScaleTestIngs {
|
||||
if ing != nil {
|
||||
if err := f.Clientset.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
f.Logger.Infof("Cleaning up services...")
|
||||
for _, svc := range f.ScaleTestSvcs {
|
||||
if svc != nil {
|
||||
if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
if f.ScaleTestDeploy != nil {
|
||||
f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name)
|
||||
if err := f.Clientset.ExtensionsV1beta1().Deployments(f.ScaleTestDeploy.Namespace).Delete(f.ScaleTestDeploy.Name, nil); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
f.Logger.Infof("Cleaning up cloud resources...")
|
||||
if err := f.GCEController.CleanupGCEIngressControllerWithTimeout(ingressesCleanupTimeout); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// RunScaleTest runs ingress scale testing.
|
||||
func (f *IngressScaleFramework) RunScaleTest() []error {
|
||||
var errs []error
|
||||
|
||||
testDeploy := generateScaleTestBackendDeploymentSpec(scaleTestNumBackends)
|
||||
f.Logger.Infof("Creating deployment %s...", testDeploy.Name)
|
||||
testDeploy, err := f.Jig.Client.ExtensionsV1beta1().Deployments(f.Namespace).Create(testDeploy)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("Failed to create deployment %s: %v", testDeploy.Name, err))
|
||||
return errs
|
||||
}
|
||||
f.ScaleTestDeploy = testDeploy
|
||||
|
||||
if f.EnableTLS {
|
||||
f.Logger.Infof("Ensuring TLS secret %s...", scaleTestSecretName)
|
||||
if err := f.Jig.PrepareTLSSecret(f.Namespace, scaleTestSecretName, scaleTestHostname); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Failed to prepare TLS secret %s: %v", scaleTestSecretName, err))
|
||||
return errs
|
||||
}
|
||||
}
|
||||
|
||||
// currentNum keeps track of how many ingresses have been created.
|
||||
currentNum := new(int)
|
||||
|
||||
prepareIngsFunc := func(goalNum int) {
|
||||
var ingWg sync.WaitGroup
|
||||
numToCreate := goalNum - *currentNum
|
||||
ingWg.Add(numToCreate)
|
||||
errQueue := make(chan error, numToCreate)
|
||||
latencyQueue := make(chan time.Duration, numToCreate)
|
||||
start := time.Now()
|
||||
for ; *currentNum < goalNum; *currentNum++ {
|
||||
suffix := fmt.Sprintf("%d", *currentNum)
|
||||
go func() {
|
||||
defer ingWg.Done()
|
||||
|
||||
start := time.Now()
|
||||
svcCreated, ingCreated, err := f.createScaleTestServiceIngress(suffix, f.EnableTLS)
|
||||
f.ScaleTestSvcs = append(f.ScaleTestSvcs, svcCreated)
|
||||
f.ScaleTestIngs = append(f.ScaleTestIngs, ingCreated)
|
||||
if err != nil {
|
||||
errQueue <- err
|
||||
return
|
||||
}
|
||||
f.Logger.Infof("Waiting for ingress %s to come up...", ingCreated.Name)
|
||||
if err := f.Jig.WaitForGivenIngressWithTimeout(ingCreated, false, waitForIngressMaxTimeout); err != nil {
|
||||
errQueue <- err
|
||||
return
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
f.Logger.Infof("Spent %s for ingress %s to come up", elapsed, ingCreated.Name)
|
||||
latencyQueue <- elapsed
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait until all ingress creations are complete.
|
||||
f.Logger.Infof("Waiting for %d ingresses to come up...", numToCreate)
|
||||
ingWg.Wait()
|
||||
close(errQueue)
|
||||
close(latencyQueue)
|
||||
elapsed := time.Since(start)
|
||||
var createLatencies []time.Duration
|
||||
for latency := range latencyQueue {
|
||||
createLatencies = append(createLatencies, latency)
|
||||
}
|
||||
f.BatchCreateLatencies = append(f.BatchCreateLatencies, createLatencies)
|
||||
if len(errQueue) != 0 {
|
||||
f.Logger.Errorf("Failed while creating services and ingresses, spent %v", elapsed)
|
||||
for err := range errQueue {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
f.Logger.Infof("Spent %s for %d ingresses to come up", elapsed, numToCreate)
|
||||
f.BatchDurations = append(f.BatchDurations, elapsed)
|
||||
}
|
||||
|
||||
measureCreateUpdateFunc := func() {
|
||||
f.Logger.Infof("Create one more ingress and wait for it to come up")
|
||||
start := time.Now()
|
||||
svcCreated, ingCreated, err := f.createScaleTestServiceIngress(fmt.Sprintf("%d", *currentNum), f.EnableTLS)
|
||||
*currentNum = *currentNum + 1
|
||||
f.ScaleTestSvcs = append(f.ScaleTestSvcs, svcCreated)
|
||||
f.ScaleTestIngs = append(f.ScaleTestIngs, ingCreated)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
|
||||
f.Logger.Infof("Waiting for ingress %s to come up...", ingCreated.Name)
|
||||
if err := f.Jig.WaitForGivenIngressWithTimeout(ingCreated, false, waitForIngressMaxTimeout); err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
f.Logger.Infof("Spent %s for ingress %s to come up", elapsed, ingCreated.Name)
|
||||
f.StepCreateLatencies = append(f.StepCreateLatencies, elapsed)
|
||||
|
||||
f.Logger.Infof("Updating ingress and wait for change to take effect")
|
||||
ingToUpdate, err := f.Clientset.ExtensionsV1beta1().Ingresses(f.Namespace).Get(ingCreated.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
addTestPathToIngress(ingToUpdate)
|
||||
start = time.Now()
|
||||
ingToUpdate, err = f.Clientset.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ingToUpdate)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := f.Jig.WaitForGivenIngressWithTimeout(ingToUpdate, false, waitForIngressMaxTimeout); err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
elapsed = time.Since(start)
|
||||
f.Logger.Infof("Spent %s for updating ingress %s", elapsed, ingToUpdate.Name)
|
||||
f.StepUpdateLatencies = append(f.StepUpdateLatencies, elapsed)
|
||||
}
|
||||
|
||||
defer f.dumpLatencies()
|
||||
|
||||
for _, num := range f.NumIngressesTest {
|
||||
f.Logger.Infof("Create more ingresses until we reach %d ingresses", num)
|
||||
prepareIngsFunc(num)
|
||||
f.Logger.Infof("Measure create and update latency with %d ingresses", num)
|
||||
measureCreateUpdateFunc()
|
||||
|
||||
if len(errs) != 0 {
|
||||
return errs
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (f *IngressScaleFramework) dumpLatencies() {
|
||||
f.Logger.Infof("Dumping scale test latencies...")
|
||||
formattedData := f.GetFormattedLatencies()
|
||||
if f.OutputFile != "" {
|
||||
f.Logger.Infof("Dumping scale test latencies to file %s...", f.OutputFile)
|
||||
ioutil.WriteFile(f.OutputFile, []byte(formattedData), 0644)
|
||||
return
|
||||
}
|
||||
f.Logger.Infof("\n%v", formattedData)
|
||||
}
|
||||
|
||||
// GetFormattedLatencies returns the formatted latencies output.
|
||||
// TODO: Need a better way/format for data output.
|
||||
func (f *IngressScaleFramework) GetFormattedLatencies() string {
|
||||
if len(f.NumIngressesTest) == 0 ||
|
||||
len(f.NumIngressesTest) != len(f.BatchCreateLatencies) ||
|
||||
len(f.NumIngressesTest) != len(f.BatchDurations) ||
|
||||
len(f.NumIngressesTest) != len(f.StepCreateLatencies) ||
|
||||
len(f.NumIngressesTest) != len(f.StepUpdateLatencies) {
|
||||
return "Failed to construct latencies output."
|
||||
}
|
||||
|
||||
res := "--- Procedure logs ---\n"
|
||||
for i, latencies := range f.BatchCreateLatencies {
|
||||
res += fmt.Sprintf("Create %d ingresses parallelly, each of them takes below amount of time before starts serving traffic:\n", len(latencies))
|
||||
for _, latency := range latencies {
|
||||
res = res + fmt.Sprintf("- %v\n", latency)
|
||||
}
|
||||
res += fmt.Sprintf("Total duration for completing %d ingress creations: %v\n", len(latencies), f.BatchDurations[i])
|
||||
res += fmt.Sprintf("Duration to create one more ingress with %d ingresses existing: %v\n", f.NumIngressesTest[i], f.StepCreateLatencies[i])
|
||||
res += fmt.Sprintf("Duration to update one ingress with %d ingresses existing: %v\n", f.NumIngressesTest[i]+1, f.StepUpdateLatencies[i])
|
||||
}
|
||||
res = res + "--- Summary ---\n"
|
||||
var batchTotalStr, batchAvgStr, singleCreateStr, singleUpdateStr string
|
||||
for i, latencies := range f.BatchCreateLatencies {
|
||||
batchTotalStr += fmt.Sprintf("Batch creation total latency for %d ingresses with %d ingresses existing: %v\n", len(latencies), f.NumIngressesTest[i]-len(latencies), f.BatchDurations[i])
|
||||
var avgLatency time.Duration
|
||||
for _, latency := range latencies {
|
||||
avgLatency = avgLatency + latency
|
||||
}
|
||||
avgLatency /= time.Duration(len(latencies))
|
||||
batchAvgStr += fmt.Sprintf("Batch creation average latency for %d ingresses with %d ingresses existing: %v\n", len(latencies), f.NumIngressesTest[i]-len(latencies), avgLatency)
|
||||
singleCreateStr += fmt.Sprintf("Single ingress creation latency with %d ingresses existing: %v\n", f.NumIngressesTest[i], f.StepCreateLatencies[i])
|
||||
singleUpdateStr += fmt.Sprintf("Single ingress update latency with %d ingresses existing: %v\n", f.NumIngressesTest[i]+1, f.StepUpdateLatencies[i])
|
||||
}
|
||||
res += batchTotalStr + batchAvgStr + singleCreateStr + singleUpdateStr
|
||||
return res
|
||||
}
|
||||
|
||||
func addTestPathToIngress(ing *extensions.Ingress) {
|
||||
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths = append(
|
||||
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths,
|
||||
extensions.HTTPIngressPath{
|
||||
Path: "/test",
|
||||
Backend: ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Backend,
|
||||
})
|
||||
}
|
||||
|
||||
func (f *IngressScaleFramework) createScaleTestServiceIngress(suffix string, enableTLS bool) (*v1.Service, *extensions.Ingress, error) {
|
||||
svcCreated, err := f.Clientset.CoreV1().Services(f.Namespace).Create(generateScaleTestServiceSpec(suffix))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ingCreated, err := f.Clientset.ExtensionsV1beta1().Ingresses(f.Namespace).Create(generateScaleTestIngressSpec(suffix, enableTLS))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return svcCreated, ingCreated, nil
|
||||
}
|
||||
|
||||
func generateScaleTestIngressSpec(suffix string, enableTLS bool) *extensions.Ingress {
|
||||
ing := &extensions.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", scaleTestIngressNamePrefix, suffix),
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
TLS: []extensions.IngressTLS{
|
||||
{SecretName: scaleTestSecretName},
|
||||
},
|
||||
Rules: []extensions.IngressRule{
|
||||
{
|
||||
Host: scaleTestHostname,
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: []extensions.HTTPIngressPath{
|
||||
{
|
||||
Path: "/scale",
|
||||
Backend: extensions.IngressBackend{
|
||||
ServiceName: fmt.Sprintf("%s-%s", scaleTestBackendName, suffix),
|
||||
ServicePort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if enableTLS {
|
||||
ing.Spec.TLS = []extensions.IngressTLS{
|
||||
{SecretName: scaleTestSecretName},
|
||||
}
|
||||
}
|
||||
return ing
|
||||
}
|
||||
|
||||
func generateScaleTestServiceSpec(suffix string) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", scaleTestBackendName, suffix),
|
||||
Labels: scaleTestLabels,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Name: "http",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(8080),
|
||||
}},
|
||||
Selector: scaleTestLabels,
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateScaleTestBackendDeploymentSpec(numReplicas int32) *extensions.Deployment {
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleTestBackendName,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &numReplicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: scaleTestLabels},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: scaleTestLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: scaleTestBackendName,
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 8080}},
|
||||
ReadinessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Port: intstr.FromInt(8080),
|
||||
Path: "/healthz",
|
||||
},
|
||||
},
|
||||
FailureThreshold: 10,
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
TimeoutSeconds: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user