mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
10
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
@ -45,7 +45,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
@ -61,7 +60,6 @@ go_library(
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
@ -73,7 +71,8 @@ go_library(
|
||||
"//pkg/kubemark:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
@ -116,10 +115,8 @@ go_library(
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
@ -144,11 +141,12 @@ go_library(
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
)
|
||||
|
||||
@ -66,18 +67,23 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
|
||||
Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
Failf("failed to initialize dynamic client: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
crd := newCRDForTest(testcrd)
|
||||
|
||||
//create CRD and waits for the resource to be recognized and available.
|
||||
dynamicClient, err := testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient, f.ClientPool)
|
||||
crd, err = testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
|
||||
if err != nil {
|
||||
Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
resourceClient := dynamicClient.Resource(&metav1.APIResource{
|
||||
Name: crd.Spec.Names.Plural,
|
||||
Namespaced: true,
|
||||
}, f.Namespace.Name)
|
||||
|
||||
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Spec.Names.Plural}
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(f.Namespace.Name)
|
||||
|
||||
testcrd.ApiExtensionClient = apiExtensionClient
|
||||
testcrd.Crd = crd
|
||||
|
77
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
77
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
@ -22,37 +22,36 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
|
||||
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to clean up old rcs.
|
||||
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*extensions.ReplicaSet
|
||||
var d *extensions.Deployment
|
||||
var oldRSs []*apps.ReplicaSet
|
||||
var d *apps.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
d = deployment
|
||||
|
||||
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -65,7 +64,7 @@ func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
|
||||
return pollErr
|
||||
}
|
||||
|
||||
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
|
||||
func logReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) {
|
||||
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
|
||||
}
|
||||
|
||||
@ -73,7 +72,7 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,
|
||||
return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
|
||||
}
|
||||
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType) error {
|
||||
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
@ -84,16 +83,17 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
||||
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment {
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType apps.DeploymentStrategyType) *apps.Deployment {
|
||||
zero := int64(0)
|
||||
return &extensions.Deployment{
|
||||
return &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Name: deploymentName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: strategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
@ -117,13 +117,13 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
|
||||
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
|
||||
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
|
||||
// such as shortly after a scaling event or the deployment is just created.
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment) error {
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment) error {
|
||||
return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensions.Deployment) error {
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment) error {
|
||||
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
@ -140,12 +140,12 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
|
||||
|
||||
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
|
||||
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
|
||||
if d.Spec.Strategy.Type != apps.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -153,12 +153,12 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
|
||||
status := d.Status
|
||||
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
d := event.Object.(*extensions.Deployment)
|
||||
d := event.Object.(*apps.Deployment)
|
||||
status = d.Status
|
||||
|
||||
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.ExtensionsV1beta1())
|
||||
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.ExtensionsV1beta1())
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
|
||||
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
|
||||
if err == nil && nerr == nil {
|
||||
Logf("%+v", d)
|
||||
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
|
||||
@ -179,8 +179,8 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
|
||||
return err
|
||||
}
|
||||
|
||||
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments"))
|
||||
func ScaleDeployment(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, appsinternal.Kind("Deployment"), appsinternal.Resource("deployments"))
|
||||
}
|
||||
|
||||
func RunDeployment(config testutils.DeploymentConfig) error {
|
||||
@ -190,13 +190,13 @@ func RunDeployment(config testutils.DeploymentConfig) error {
|
||||
return testutils.RunDeployment(config)
|
||||
}
|
||||
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) {
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet) {
|
||||
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
|
||||
}
|
||||
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *extensions.Deployment, targetRevision string) error {
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *apps.Deployment, targetRevision string) error {
|
||||
err := wait.PollImmediate(Poll, pollLongTimeout, func() (bool, error) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -214,9 +214,9 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
|
||||
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
|
||||
}
|
||||
|
||||
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) {
|
||||
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*apps.Deployment, error) {
|
||||
deploymentSpec := MakeDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
|
||||
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(deploymentSpec)
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(deploymentSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
|
||||
}
|
||||
@ -230,19 +230,22 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[
|
||||
|
||||
// MakeDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment {
|
||||
func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *apps.Deployment {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
zero := int64(0)
|
||||
deploymentName := "deployment-" + string(uuid.NewUUID())
|
||||
deploymentSpec := &extensions.Deployment{
|
||||
deploymentSpec := &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
@ -281,8 +284,8 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
|
||||
}
|
||||
|
||||
// GetPodsForDeployment gets pods for the given deployment
|
||||
func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Deployment) (*v1.PodList, error) {
|
||||
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.ExtensionsV1beta1())
|
||||
func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) {
|
||||
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
|
||||
}
|
||||
@ -292,7 +295,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Dep
|
||||
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
return client.CoreV1().Pods(namespace).List(options)
|
||||
}
|
||||
rsList := []*extensions.ReplicaSet{replicaSet}
|
||||
rsList := []*apps.ReplicaSet{replicaSet}
|
||||
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
|
||||
|
28
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -28,7 +28,6 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -40,6 +39,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
@ -70,7 +70,7 @@ type Framework struct {
|
||||
|
||||
InternalClientset *internalclientset.Clientset
|
||||
AggregatorClient *aggregatorclient.Clientset
|
||||
ClientPool dynamic.ClientPool
|
||||
DynamicClient dynamic.Interface
|
||||
|
||||
ScalesGetter scaleclient.ScalesGetter
|
||||
|
||||
@ -167,7 +167,8 @@ func (f *Framework) BeforeEach() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.ClientPool = dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||
// as they are required when creating a REST client.
|
||||
@ -182,7 +183,7 @@ func (f *Framework) BeforeEach() {
|
||||
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
||||
restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoClient, meta.InterfacesForUnstructured)
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
|
||||
restMapper.Reset()
|
||||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
@ -288,7 +289,7 @@ func (f *Framework) AfterEach() {
|
||||
if f.NamespaceDeletionTimeout != 0 {
|
||||
timeout = f.NamespaceDeletionTimeout
|
||||
}
|
||||
if err := deleteNS(f.ClientSet, f.ClientPool, ns.Name, timeout); err != nil {
|
||||
if err := deleteNS(f.ClientSet, f.DynamicClient, ns.Name, timeout); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
} else {
|
||||
@ -399,10 +400,7 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
ns, err := createTestingNS(baseName, f.ClientSet, labels)
|
||||
// check ns instead of err to see if it's nil as we may
|
||||
// fail to create serviceAccount in it.
|
||||
// In this case, we should not forget to delete the namespace.
|
||||
if ns != nil {
|
||||
f.namespacesToDelete = append(f.namespacesToDelete, ns)
|
||||
}
|
||||
f.AddNamespacesToDelete(ns)
|
||||
|
||||
if err == nil && !f.SkipPrivilegedPSPBinding {
|
||||
CreatePrivilegedPSPBinding(f, ns.Name)
|
||||
@ -411,6 +409,18 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
return ns, err
|
||||
}
|
||||
|
||||
// AddNamespacesToDelete adds one or more namespaces to be deleted when the test
|
||||
// completes.
|
||||
func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
|
||||
for _, ns := range namespaces {
|
||||
if ns == nil {
|
||||
continue
|
||||
}
|
||||
f.namespacesToDelete = append(f.namespacesToDelete, ns)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
|
||||
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
|
||||
return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/get-kubemark-resource-usage.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/get-kubemark-resource-usage.go
generated
vendored
@ -39,7 +39,7 @@ func getMasterUsageByPrefix(prefix string) (string, error) {
|
||||
// TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework)
|
||||
func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage {
|
||||
result := make(map[string]*KubemarkResourceUsage)
|
||||
// Get kuberenetes component resource usage
|
||||
// Get kubernetes component resource usage
|
||||
sshResult, err := getMasterUsageByPrefix("kube")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||
|
56
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
56
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
@ -35,8 +35,12 @@ func lookupClusterImageSources() (string, string, error) {
|
||||
gcloudf := func(argv ...string) ([]string, error) {
|
||||
args := []string{"compute"}
|
||||
args = append(args, argv...)
|
||||
args = append(args, "--project", TestContext.CloudConfig.ProjectID,
|
||||
"--zone", TestContext.CloudConfig.Zone)
|
||||
args = append(args, "--project", TestContext.CloudConfig.ProjectID)
|
||||
if TestContext.CloudConfig.MultiMaster {
|
||||
args = append(args, "--region", TestContext.CloudConfig.Region)
|
||||
} else {
|
||||
args = append(args, "--zone", TestContext.CloudConfig.Zone)
|
||||
}
|
||||
outputBytes, err := exec.Command("gcloud", args...).CombinedOutput()
|
||||
str := strings.Replace(string(outputBytes), ",", "\n", -1)
|
||||
str = strings.Replace(str, ";", "\n", -1)
|
||||
@ -141,6 +145,28 @@ func CreateManagedInstanceGroup(size int64, zone, template string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetManagedInstanceGroupTemplateName(zone string) (string, error) {
|
||||
// TODO(verult): make this hit the compute API directly instead of
|
||||
// shelling out to gcloud. Use InstanceGroupManager to get Instance Template name.
|
||||
|
||||
stdout, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
|
||||
"list",
|
||||
fmt.Sprintf("--filter=name:%s", TestContext.CloudConfig.NodeInstanceGroup),
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zones=%s", zone),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("gcloud compute instance-groups managed list call failed with err: %v", err)
|
||||
}
|
||||
|
||||
templateName, err := parseInstanceTemplateName(stdout)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing gcloud output: %v", err)
|
||||
}
|
||||
return templateName, nil
|
||||
}
|
||||
|
||||
func DeleteManagedInstanceGroup(zone string) error {
|
||||
// TODO(verult): make this hit the compute API directly instead of
|
||||
// shelling out to gcloud.
|
||||
@ -154,3 +180,29 @@ func DeleteManagedInstanceGroup(zone string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseInstanceTemplateName(gcloudOutput string) (string, error) {
|
||||
const templateNameField = "INSTANCE_TEMPLATE"
|
||||
|
||||
lines := strings.Split(gcloudOutput, "\n")
|
||||
if len(lines) <= 1 { // Empty output or only contains column names
|
||||
return "", fmt.Errorf("the list is empty")
|
||||
}
|
||||
|
||||
// Otherwise, there should be exactly 1 entry, i.e. 2 lines
|
||||
fieldNames := strings.Fields(lines[0])
|
||||
instanceTemplateColumn := 0
|
||||
for instanceTemplateColumn < len(fieldNames) &&
|
||||
fieldNames[instanceTemplateColumn] != templateNameField {
|
||||
instanceTemplateColumn++
|
||||
}
|
||||
|
||||
if instanceTemplateColumn == len(fieldNames) {
|
||||
return "", fmt.Errorf("the list does not contain instance template information")
|
||||
}
|
||||
|
||||
fields := strings.Fields(lines[1])
|
||||
instanceTemplateName := fields[instanceTemplateColumn]
|
||||
|
||||
return instanceTemplateName, nil
|
||||
}
|
||||
|
266
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
266
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
@ -119,6 +120,16 @@ const (
|
||||
// GCE only allows names < 64 characters, and the loadbalancer controller inserts
|
||||
// a single character of padding.
|
||||
nameLenLimit = 62
|
||||
|
||||
NEGAnnotation = "alpha.cloud.google.com/load-balancer-neg"
|
||||
NEGUpdateTimeout = 2 * time.Minute
|
||||
|
||||
InstanceGroupAnnotation = "ingress.gcp.kubernetes.io/instance-groups"
|
||||
|
||||
// Prefix for annotation keys used by the ingress controller to specify the
|
||||
// names of GCP resources such as forwarding rules, url maps, target proxies, etc
|
||||
// that it created for the corresponding ingress.
|
||||
StatusPrefix = "ingress.kubernetes.io"
|
||||
)
|
||||
|
||||
type TestLogger interface {
|
||||
@ -165,7 +176,7 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
||||
updateURLMapHost := "bar.baz.com"
|
||||
updateURLMapPath := "/testurl"
|
||||
// Platform agnostic list of tests that must be satisfied by all controllers
|
||||
return []IngressConformanceTests{
|
||||
tests := []IngressConformanceTests{
|
||||
{
|
||||
fmt.Sprintf("should create a basic HTTP ingress"),
|
||||
func() { jig.CreateIngress(manifestPath, ns, annotations, annotations) },
|
||||
@ -173,30 +184,9 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should terminate TLS for host %v", tlsHost),
|
||||
func() { jig.AddHTTPS(tlsSecretName, tlsHost) },
|
||||
func() { jig.SetHTTPS(tlsSecretName, tlsHost) },
|
||||
fmt.Sprintf("waiting for HTTPS updates to reflect in ingress"),
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost),
|
||||
func() {
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
newRules := []extensions.IngressRule{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.Host != tlsHost {
|
||||
newRules = append(newRules, rule)
|
||||
continue
|
||||
}
|
||||
newRules = append(newRules, extensions.IngressRule{
|
||||
Host: updatedTLSHost,
|
||||
IngressRuleValue: rule.IngressRuleValue,
|
||||
})
|
||||
}
|
||||
ing.Spec.Rules = newRules
|
||||
})
|
||||
jig.AddHTTPS(tlsSecretName, updatedTLSHost)
|
||||
},
|
||||
fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath),
|
||||
func() {
|
||||
@ -233,6 +223,31 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
||||
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
||||
},
|
||||
}
|
||||
// Skip the Update TLS cert test for kubemci: https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/141.
|
||||
if jig.Class != MulticlusterIngressClassValue {
|
||||
tests = append(tests, IngressConformanceTests{
|
||||
fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost),
|
||||
func() {
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
newRules := []extensions.IngressRule{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.Host != tlsHost {
|
||||
newRules = append(newRules, rule)
|
||||
continue
|
||||
}
|
||||
newRules = append(newRules, extensions.IngressRule{
|
||||
Host: updatedTLSHost,
|
||||
IngressRuleValue: rule.IngressRuleValue,
|
||||
})
|
||||
}
|
||||
ing.Spec.Rules = newRules
|
||||
})
|
||||
jig.SetHTTPS(tlsSecretName, updatedTLSHost)
|
||||
},
|
||||
fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
|
||||
})
|
||||
}
|
||||
return tests
|
||||
}
|
||||
|
||||
// GenerateRSACerts generates a basic self signed certificate using a key length
|
||||
@ -850,14 +865,24 @@ func (cont *GCEIngressController) GetFirewallRuleName() string {
|
||||
}
|
||||
|
||||
// GetFirewallRule returns the firewall used by the GCEIngressController.
|
||||
// Causes a fatal error incase of an error.
|
||||
// TODO: Rename this to GetFirewallRuleOrDie and similarly rename all other
|
||||
// methods here to be consistent with rest of the code in this repo.
|
||||
func (cont *GCEIngressController) GetFirewallRule() *compute.Firewall {
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
fwName := cont.GetFirewallRuleName()
|
||||
fw, err := gceCloud.GetFirewall(fwName)
|
||||
fw, err := cont.GetFirewallRuleOrError()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return fw
|
||||
}
|
||||
|
||||
// GetFirewallRule returns the firewall used by the GCEIngressController.
|
||||
// Returns an error if that fails.
|
||||
// TODO: Rename this to GetFirewallRule when the above method with that name is renamed.
|
||||
func (cont *GCEIngressController) GetFirewallRuleOrError() (*compute.Firewall, error) {
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
fwName := cont.GetFirewallRuleName()
|
||||
return gceCloud.GetFirewall(fwName)
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteFirewallRule(del bool) (msg string) {
|
||||
fwList := []compute.Firewall{}
|
||||
regex := fmt.Sprintf("%vfw-l7%v.*", k8sPrefix, clusterDelimiter)
|
||||
@ -883,41 +908,72 @@ func (cont *GCEIngressController) isHTTPErrorCode(err error, code int) bool {
|
||||
}
|
||||
|
||||
// BackendServiceUsingNEG returns true only if all global backend service with matching nodeports pointing to NEG as backend
|
||||
func (cont *GCEIngressController) BackendServiceUsingNEG(nodeports []string) (bool, error) {
|
||||
return cont.backendMode(nodeports, "networkEndpointGroups")
|
||||
func (cont *GCEIngressController) BackendServiceUsingNEG(svcPorts map[string]v1.ServicePort) (bool, error) {
|
||||
return cont.backendMode(svcPorts, "networkEndpointGroups")
|
||||
}
|
||||
|
||||
// BackendServiceUsingIG returns true only if all global backend service with matching nodeports pointing to IG as backend
|
||||
func (cont *GCEIngressController) BackendServiceUsingIG(nodeports []string) (bool, error) {
|
||||
return cont.backendMode(nodeports, "instanceGroups")
|
||||
// BackendServiceUsingIG returns true only if all global backend service with matching svcPorts pointing to IG as backend
|
||||
func (cont *GCEIngressController) BackendServiceUsingIG(svcPorts map[string]v1.ServicePort) (bool, error) {
|
||||
return cont.backendMode(svcPorts, "instanceGroups")
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) backendMode(nodeports []string, keyword string) (bool, error) {
|
||||
func (cont *GCEIngressController) backendMode(svcPorts map[string]v1.ServicePort, keyword string) (bool, error) {
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
beList, err := gceCloud.ListGlobalBackendServices()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list backend services: %v", err)
|
||||
}
|
||||
|
||||
hcList, err := gceCloud.ListHealthChecks()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list health checks: %v", err)
|
||||
}
|
||||
|
||||
uid := cont.UID
|
||||
if len(uid) > 8 {
|
||||
uid = uid[:8]
|
||||
}
|
||||
|
||||
matchingBackendService := 0
|
||||
for _, bs := range beList {
|
||||
for svcName, sp := range svcPorts {
|
||||
match := false
|
||||
for _, np := range nodeports {
|
||||
// Warning: This assumes backend service naming convention includes nodeport in the name
|
||||
if strings.Contains(bs.Name, np) {
|
||||
bsMatch := &compute.BackendService{}
|
||||
// Non-NEG BackendServices are named with the Nodeport in the name.
|
||||
// NEG BackendServices' names contain the a sha256 hash of a string.
|
||||
negString := strings.Join([]string{uid, cont.Ns, svcName, sp.TargetPort.String()}, ";")
|
||||
negHash := fmt.Sprintf("%x", sha256.Sum256([]byte(negString)))[:8]
|
||||
for _, bs := range beList {
|
||||
if strings.Contains(bs.Name, strconv.Itoa(int(sp.NodePort))) ||
|
||||
strings.Contains(bs.Name, negHash) {
|
||||
match = true
|
||||
bsMatch = bs
|
||||
matchingBackendService += 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
for _, be := range bs.Backends {
|
||||
for _, be := range bsMatch.Backends {
|
||||
if !strings.Contains(be.Group, keyword) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the correct HealthCheck exists for the BackendService
|
||||
hcMatch := false
|
||||
for _, hc := range hcList {
|
||||
if hc.Name == bsMatch.Name {
|
||||
hcMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !hcMatch {
|
||||
return false, fmt.Errorf("missing healthcheck for backendservice: %v", bsMatch.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return matchingBackendService == len(nodeports), nil
|
||||
return matchingBackendService == len(svcPorts), nil
|
||||
}
|
||||
|
||||
// Cleanup cleans up cloud resources.
|
||||
@ -1131,7 +1187,7 @@ func (j *IngressTestJig) CreateIngress(manifestPath, ns string, ingAnnotations m
|
||||
for k, v := range ingAnnotations {
|
||||
j.Ingress.Annotations[k] = v
|
||||
}
|
||||
j.Logger.Infof(fmt.Sprintf("creating" + j.Ingress.Name + " ingress"))
|
||||
j.Logger.Infof(fmt.Sprintf("creating " + j.Ingress.Name + " ingress"))
|
||||
j.Ingress, err = j.runCreate(j.Ingress)
|
||||
ExpectNoError(err)
|
||||
}
|
||||
@ -1146,7 +1202,7 @@ func (j *IngressTestJig) runCreate(ing *extensions.Ingress) (*extensions.Ingress
|
||||
if err := manifest.IngressToManifest(ing, filePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err := runKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
|
||||
_, err := RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
|
||||
return ing, err
|
||||
}
|
||||
|
||||
@ -1161,7 +1217,7 @@ func (j *IngressTestJig) runUpdate(ing *extensions.Ingress) (*extensions.Ingress
|
||||
if err := manifest.IngressToManifest(ing, filePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err := runKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath), "--force")
|
||||
_, err := RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath), "--force")
|
||||
return ing, err
|
||||
}
|
||||
|
||||
@ -1172,7 +1228,7 @@ func (j *IngressTestJig) Update(update func(ing *extensions.Ingress)) {
|
||||
for i := 0; i < 3; i++ {
|
||||
j.Ingress, err = j.Client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("failed to get ingress %q: %v", name, err)
|
||||
Failf("failed to get ingress %s/%s: %v", ns, name, err)
|
||||
}
|
||||
update(j.Ingress)
|
||||
j.Ingress, err = j.runUpdate(j.Ingress)
|
||||
@ -1181,24 +1237,50 @@ func (j *IngressTestJig) Update(update func(ing *extensions.Ingress)) {
|
||||
return
|
||||
}
|
||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
||||
Failf("failed to update ingress %q: %v", name, err)
|
||||
Failf("failed to update ingress %s/%s: %v", ns, name, err)
|
||||
}
|
||||
}
|
||||
Failf("too many retries updating ingress %q", name)
|
||||
Failf("too many retries updating ingress %s/%s", ns, name)
|
||||
}
|
||||
|
||||
// AddHTTPS updates the ingress to use this secret for these hosts.
|
||||
// AddHTTPS updates the ingress to add this secret for these hosts.
|
||||
func (j *IngressTestJig) AddHTTPS(secretName string, hosts ...string) {
|
||||
j.Ingress.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
|
||||
// TODO: Just create the secret in GetRootCAs once we're watching secrets in
|
||||
// the ingress controller.
|
||||
_, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...)
|
||||
ExpectNoError(err)
|
||||
j.Logger.Infof("Updating ingress %v to use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Logger.Infof("Updating ingress %v to also use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.TLS = append(ing.Spec.TLS, extensions.IngressTLS{Hosts: hosts, SecretName: secretName})
|
||||
})
|
||||
j.RootCAs[secretName] = cert
|
||||
}
|
||||
|
||||
// SetHTTPS updates the ingress to use only this secret for these hosts.
|
||||
func (j *IngressTestJig) SetHTTPS(secretName string, hosts ...string) {
|
||||
_, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...)
|
||||
ExpectNoError(err)
|
||||
j.Logger.Infof("Updating ingress %v to only use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
|
||||
})
|
||||
j.RootCAs[secretName] = cert
|
||||
j.RootCAs = map[string][]byte{secretName: cert}
|
||||
}
|
||||
|
||||
// RemoveHTTPS updates the ingress to not use this secret for TLS.
|
||||
// Note: Does not delete the secret.
|
||||
func (j *IngressTestJig) RemoveHTTPS(secretName string) {
|
||||
newTLS := []extensions.IngressTLS{}
|
||||
for _, ingressTLS := range j.Ingress.Spec.TLS {
|
||||
if secretName != ingressTLS.SecretName {
|
||||
newTLS = append(newTLS, ingressTLS)
|
||||
}
|
||||
}
|
||||
j.Logger.Infof("Updating ingress %v to not use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.TLS = newTLS
|
||||
})
|
||||
delete(j.RootCAs, secretName)
|
||||
}
|
||||
|
||||
// PrepareTLSSecret creates a TLS secret and caches the cert.
|
||||
@ -1227,7 +1309,7 @@ func (j *IngressTestJig) TryDeleteIngress() {
|
||||
}
|
||||
|
||||
func (j *IngressTestJig) TryDeleteGivenIngress(ing *extensions.Ingress) {
|
||||
if err := j.runDelete(ing, j.Class); err != nil {
|
||||
if err := j.runDelete(ing); err != nil {
|
||||
j.Logger.Infof("Error while deleting the ingress %v/%v with class %s: %v", ing.Namespace, ing.Name, j.Class, err)
|
||||
}
|
||||
}
|
||||
@ -1240,7 +1322,7 @@ func (j *IngressTestJig) TryDeleteGivenService(svc *v1.Service) {
|
||||
}
|
||||
|
||||
// runDelete runs the required command to delete the given ingress.
|
||||
func (j *IngressTestJig) runDelete(ing *extensions.Ingress, class string) error {
|
||||
func (j *IngressTestJig) runDelete(ing *extensions.Ingress) error {
|
||||
if j.Class != MulticlusterIngressClassValue {
|
||||
return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil)
|
||||
}
|
||||
@ -1249,7 +1331,7 @@ func (j *IngressTestJig) runDelete(ing *extensions.Ingress, class string) error
|
||||
if err := manifest.IngressToManifest(ing, filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := runKubemciWithKubeconfig("delete", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
|
||||
_, err := RunKubemciWithKubeconfig("delete", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1257,7 +1339,7 @@ func (j *IngressTestJig) runDelete(ing *extensions.Ingress, class string) error
|
||||
// TODO(nikhiljindal): Update this to be able to return hostname as well.
|
||||
func getIngressAddressFromKubemci(name string) ([]string, error) {
|
||||
var addresses []string
|
||||
out, err := runKubemciCmd("get-status", name)
|
||||
out, err := RunKubemciCmd("get-status", name)
|
||||
if err != nil {
|
||||
return addresses, err
|
||||
}
|
||||
@ -1304,13 +1386,14 @@ func (j *IngressTestJig) WaitForIngressAddress(c clientset.Interface, ns, ingNam
|
||||
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
|
||||
ipOrNameList, err := getIngressAddress(c, ns, ingName, j.Class)
|
||||
if err != nil || len(ipOrNameList) == 0 {
|
||||
j.Logger.Errorf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
|
||||
if IsRetryableAPIError(err) {
|
||||
j.Logger.Errorf("Waiting for Ingress %s/%s to acquire IP, error: %v, ipOrNameList: %v", ns, ingName, err, ipOrNameList)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
address = ipOrNameList[0]
|
||||
j.Logger.Infof("Found address %s for ingress %s/%s", address, ns, ingName)
|
||||
return true, nil
|
||||
})
|
||||
return address, err
|
||||
@ -1333,7 +1416,9 @@ func (j *IngressTestJig) pollIngressWithCert(ing *extensions.Ingress, address st
|
||||
}
|
||||
for _, p := range rules.IngressRuleValue.HTTP.Paths {
|
||||
if waitForNodePort {
|
||||
if err := j.pollServiceNodePort(ing.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal)); err != nil {
|
||||
nodePort := int(p.Backend.ServicePort.IntVal)
|
||||
if err := j.pollServiceNodePort(ing.Namespace, p.Backend.ServiceName, nodePort); err != nil {
|
||||
j.Logger.Infof("Error in waiting for nodeport %d on service %v/%v: %s", nodePort, ing.Namespace, p.Backend.ServiceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1349,7 +1434,9 @@ func (j *IngressTestJig) pollIngressWithCert(ing *extensions.Ingress, address st
|
||||
}
|
||||
|
||||
func (j *IngressTestJig) WaitForIngress(waitForNodePort bool) {
|
||||
j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, LoadBalancerPollTimeout)
|
||||
if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, LoadBalancerPollTimeout); err != nil {
|
||||
Failf("error in waiting for ingress to get an address: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForGivenIngressWithTimeout waits till the ingress acquires an IP,
|
||||
@ -1363,7 +1450,6 @@ func (j *IngressTestJig) WaitForGivenIngressWithTimeout(ing *extensions.Ingress,
|
||||
if err != nil {
|
||||
return fmt.Errorf("Ingress failed to acquire an IP address within %v", timeout)
|
||||
}
|
||||
j.Logger.Infof("Found address %v for ingress %v", address, ing.Name)
|
||||
|
||||
var knownHosts []string
|
||||
var cert []byte
|
||||
@ -1385,7 +1471,6 @@ func (j *IngressTestJig) WaitForIngressWithCert(waitForNodePort bool, knownHosts
|
||||
if err != nil {
|
||||
return fmt.Errorf("Ingress failed to acquire an IP address within %v", LoadBalancerPollTimeout)
|
||||
}
|
||||
j.Logger.Infof("Found address %v for ingress %v", address, j.Ingress.Name)
|
||||
|
||||
return j.pollIngressWithCert(j.Ingress, address, knownHosts, cert, waitForNodePort, LoadBalancerPollTimeout)
|
||||
}
|
||||
@ -1427,10 +1512,22 @@ func (j *IngressTestJig) GetDefaultBackendNodePort() (int32, error) {
|
||||
// by default, so retrieve its nodePort if includeDefaultBackend is true.
|
||||
func (j *IngressTestJig) GetIngressNodePorts(includeDefaultBackend bool) []string {
|
||||
nodePorts := []string{}
|
||||
svcPorts := j.GetServicePorts(includeDefaultBackend)
|
||||
for _, svcPort := range svcPorts {
|
||||
nodePorts = append(nodePorts, strconv.Itoa(int(svcPort.NodePort)))
|
||||
}
|
||||
return nodePorts
|
||||
}
|
||||
|
||||
// GetIngressNodePorts returns related backend services' svcPorts.
|
||||
// Current GCE ingress controller allows traffic to the default HTTP backend
|
||||
// by default, so retrieve its nodePort if includeDefaultBackend is true.
|
||||
func (j *IngressTestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.ServicePort {
|
||||
svcPorts := make(map[string]v1.ServicePort)
|
||||
if includeDefaultBackend {
|
||||
defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodePorts = append(nodePorts, strconv.Itoa(int(defaultSvc.Spec.Ports[0].NodePort)))
|
||||
svcPorts[defaultBackendName] = defaultSvc.Spec.Ports[0]
|
||||
}
|
||||
|
||||
backendSvcs := []string{}
|
||||
@ -1445,9 +1542,9 @@ func (j *IngressTestJig) GetIngressNodePorts(includeDefaultBackend bool) []strin
|
||||
for _, svcName := range backendSvcs {
|
||||
svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(svcName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodePorts = append(nodePorts, strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
|
||||
svcPorts[svcName] = svc.Spec.Ports[0]
|
||||
}
|
||||
return nodePorts
|
||||
return svcPorts
|
||||
}
|
||||
|
||||
// ConstructFirewallForIngress returns the expected GCE firewall rule for the ingress resource
|
||||
@ -1524,14 +1621,16 @@ func (cont *NginxIngressController) Init() {
|
||||
Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP)
|
||||
}
|
||||
|
||||
func GenerateReencryptionIngressSpec() *extensions.Ingress {
|
||||
func generateBacksideHTTPSIngressSpec(ns string) *extensions.Ingress {
|
||||
return &extensions.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoheaders-reencryption",
|
||||
Name: "echoheaders-https",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
// Note kubemci requres a default backend.
|
||||
Backend: &extensions.IngressBackend{
|
||||
ServiceName: "echoheaders-reencryption",
|
||||
ServiceName: "echoheaders-https",
|
||||
ServicePort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 443,
|
||||
@ -1541,10 +1640,10 @@ func GenerateReencryptionIngressSpec() *extensions.Ingress {
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateReencryptionServiceSpec() *v1.Service {
|
||||
func generateBacksideHTTPSServiceSpec() *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoheaders-reencryption",
|
||||
Name: "echoheaders-https",
|
||||
Annotations: map[string]string{
|
||||
ServiceApplicationProtocolKey: `{"my-https-port":"HTTPS"}`,
|
||||
},
|
||||
@ -1557,33 +1656,33 @@ func GenerateReencryptionServiceSpec() *v1.Service {
|
||||
TargetPort: intstr.FromString("echo-443"),
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"app": "echoheaders-reencryption",
|
||||
"app": "echoheaders-https",
|
||||
},
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateReencryptionDeploymentSpec() *extensions.Deployment {
|
||||
func generateBacksideHTTPSDeploymentSpec() *extensions.Deployment {
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoheaders-reencryption",
|
||||
Name: "echoheaders-https",
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
|
||||
"app": "echoheaders-reencryption",
|
||||
"app": "echoheaders-https",
|
||||
}},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "echoheaders-reencryption",
|
||||
"app": "echoheaders-https",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "echoheaders-reencryption",
|
||||
Image: "k8s.gcr.io/echoserver:1.9",
|
||||
Name: "echoheaders-https",
|
||||
Image: "k8s.gcr.io/echoserver:1.10",
|
||||
Ports: []v1.ContainerPort{{
|
||||
ContainerPort: 8443,
|
||||
Name: "echo-443",
|
||||
@ -1596,26 +1695,35 @@ func GenerateReencryptionDeploymentSpec() *extensions.Deployment {
|
||||
}
|
||||
}
|
||||
|
||||
func CreateReencryptionIngress(cs clientset.Interface, namespace string) (*extensions.Deployment, *v1.Service, *extensions.Ingress, error) {
|
||||
deployCreated, err := cs.ExtensionsV1beta1().Deployments(namespace).Create(GenerateReencryptionDeploymentSpec())
|
||||
// SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured.
|
||||
func (j *IngressTestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*extensions.Deployment, *v1.Service, *extensions.Ingress, error) {
|
||||
deployCreated, err := cs.ExtensionsV1beta1().Deployments(namespace).Create(generateBacksideHTTPSDeploymentSpec())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
svcCreated, err := cs.CoreV1().Services(namespace).Create(GenerateReencryptionServiceSpec())
|
||||
svcCreated, err := cs.CoreV1().Services(namespace).Create(generateBacksideHTTPSServiceSpec())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ingCreated, err := cs.ExtensionsV1beta1().Ingresses(namespace).Create(GenerateReencryptionIngressSpec())
|
||||
ingToCreate := generateBacksideHTTPSIngressSpec(namespace)
|
||||
if staticIPName != "" {
|
||||
if ingToCreate.Annotations == nil {
|
||||
ingToCreate.Annotations = map[string]string{}
|
||||
}
|
||||
ingToCreate.Annotations[IngressStaticIPKey] = staticIPName
|
||||
}
|
||||
ingCreated, err := j.runCreate(ingToCreate)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return deployCreated, svcCreated, ingCreated, nil
|
||||
}
|
||||
|
||||
func CleanupReencryptionIngress(cs clientset.Interface, deploy *extensions.Deployment, svc *v1.Service, ing *extensions.Ingress) []error {
|
||||
// DeleteTestResource deletes given deployment, service and ingress.
|
||||
func (j *IngressTestJig) DeleteTestResource(cs clientset.Interface, deploy *extensions.Deployment, svc *v1.Service, ing *extensions.Ingress) []error {
|
||||
var errs []error
|
||||
if ing != nil {
|
||||
if err := cs.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil); err != nil {
|
||||
if err := j.runDelete(ing); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
|
||||
}
|
||||
}
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
@ -210,6 +210,17 @@ func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.D
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobGone uses c to wait for up to timeout for the Job named jobName in namespace ns to be removed.
|
||||
func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
|
||||
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||
_, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
})
|
||||
}
|
||||
|
||||
// CheckForAllJobPodsRunning uses c to check in the Job named jobName in ns is running. If the returned error is not
|
||||
// nil the returned bool is true if the Job is running.
|
||||
func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) {
|
||||
|
23
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
@ -97,10 +97,11 @@ func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletM
|
||||
return kubeletMetrics, nil
|
||||
}
|
||||
|
||||
// GetKubeletLatencyMetrics gets all latency related kubelet metrics. Note that the KubeletMetrcis
|
||||
// passed in should not contain subsystem prefix.
|
||||
func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
|
||||
latencyMethods := sets.NewString(
|
||||
// GetDefaultKubeletLatencyMetrics calls GetKubeletLatencyMetrics with a set of default metricNames
|
||||
// identifying common latency metrics.
|
||||
// Note that the KubeletMetrics passed in should not contain subsystem prefix.
|
||||
func GetDefaultKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
|
||||
latencyMetricNames := sets.NewString(
|
||||
kubeletmetrics.PodWorkerLatencyKey,
|
||||
kubeletmetrics.PodWorkerStartLatencyKey,
|
||||
kubeletmetrics.PodStartLatencyKey,
|
||||
@ -109,13 +110,15 @@ func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
|
||||
kubeletmetrics.PodWorkerStartLatencyKey,
|
||||
kubeletmetrics.PLEGRelistLatencyKey,
|
||||
)
|
||||
return GetKubeletMetrics(ms, latencyMethods)
|
||||
return GetKubeletLatencyMetrics(ms, latencyMetricNames)
|
||||
}
|
||||
|
||||
func GetKubeletMetrics(ms metrics.KubeletMetrics, methods sets.String) KubeletLatencyMetrics {
|
||||
// GetKubeletLatencyMetrics filters ms to include only those contained in the metricNames set,
|
||||
// then constructs a KubeletLatencyMetrics list based on the samples associated with those metrics.
|
||||
func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics, filterMetricNames sets.String) KubeletLatencyMetrics {
|
||||
var latencyMetrics KubeletLatencyMetrics
|
||||
for method, samples := range ms {
|
||||
if !methods.Has(method) {
|
||||
for name, samples := range ms {
|
||||
if !filterMetricNames.Has(name) {
|
||||
continue
|
||||
}
|
||||
for _, sample := range samples {
|
||||
@ -131,7 +134,7 @@ func GetKubeletMetrics(ms metrics.KubeletMetrics, methods sets.String) KubeletLa
|
||||
|
||||
latencyMetrics = append(latencyMetrics, KubeletLatencyMetric{
|
||||
Operation: operation,
|
||||
Method: method,
|
||||
Method: name,
|
||||
Quantile: quantile,
|
||||
Latency: time.Duration(int64(latency)) * time.Microsecond,
|
||||
})
|
||||
@ -265,7 +268,7 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
|
||||
if err != nil {
|
||||
return KubeletLatencyMetrics{}, err
|
||||
}
|
||||
latencyMetrics := GetKubeletLatencyMetrics(ms)
|
||||
latencyMetrics := GetDefaultKubeletLatencyMetrics(ms)
|
||||
sort.Sort(latencyMetrics)
|
||||
var badMetrics KubeletLatencyMetrics
|
||||
logFunc("\nLatency metrics for node %v", nodeName)
|
||||
|
117
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
117
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
schedulermetric "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
@ -43,7 +44,6 @@ const (
|
||||
// NodeStartupThreshold is a rough estimate of the time allocated for a pod to start on a node.
|
||||
NodeStartupThreshold = 4 * time.Second
|
||||
|
||||
podStartupThreshold time.Duration = 5 * time.Second
|
||||
// We are setting 1s threshold for apicalls even in small clusters to avoid flakes.
|
||||
// The problem is that if long GC is happening in small clusters (where we have e.g.
|
||||
// 1-core master machines) and tests are pretty short, it may consume significant
|
||||
@ -131,6 +131,8 @@ func (m *MetricsForE2E) SummaryKind() string {
|
||||
return "MetricsForE2E"
|
||||
}
|
||||
|
||||
var SchedulingLatencyMetricName = model.LabelValue(schedulermetric.SchedulerSubsystem + "_" + schedulermetric.SchedulingLatencyName)
|
||||
|
||||
var InterestingApiServerMetrics = []string{
|
||||
"apiserver_request_count",
|
||||
"apiserver_request_latencies_summary",
|
||||
@ -188,7 +190,11 @@ type LatencyMetric struct {
|
||||
}
|
||||
|
||||
type PodStartupLatency struct {
|
||||
Latency LatencyMetric `json:"latency"`
|
||||
CreateToScheduleLatency LatencyMetric `json:"createToScheduleLatency"`
|
||||
ScheduleToRunLatency LatencyMetric `json:"scheduleToRunLatency"`
|
||||
RunToWatchLatency LatencyMetric `json:"runToWatchLatency"`
|
||||
ScheduleToWatchLatency LatencyMetric `json:"scheduleToWatchLatency"`
|
||||
E2ELatency LatencyMetric `json:"e2eLatency"`
|
||||
}
|
||||
|
||||
func (l *PodStartupLatency) SummaryKind() string {
|
||||
@ -203,21 +209,26 @@ func (l *PodStartupLatency) PrintJSON() string {
|
||||
return PrettyPrintJSON(PodStartupLatencyToPerfData(l))
|
||||
}
|
||||
|
||||
type SchedulingLatency struct {
|
||||
Scheduling LatencyMetric `json:"scheduling"`
|
||||
Binding LatencyMetric `json:"binding"`
|
||||
Total LatencyMetric `json:"total"`
|
||||
type SchedulingMetrics struct {
|
||||
PredicateEvaluationLatency LatencyMetric `json:"predicateEvaluationLatency"`
|
||||
PriorityEvaluationLatency LatencyMetric `json:"priorityEvaluationLatency"`
|
||||
PreemptionEvaluationLatency LatencyMetric `json:"preemptionEvaluationLatency"`
|
||||
BindingLatency LatencyMetric `json:"bindingLatency"`
|
||||
ThroughputAverage float64 `json:"throughputAverage"`
|
||||
ThroughputPerc50 float64 `json:"throughputPerc50"`
|
||||
ThroughputPerc90 float64 `json:"throughputPerc90"`
|
||||
ThroughputPerc99 float64 `json:"throughputPerc99"`
|
||||
}
|
||||
|
||||
func (l *SchedulingLatency) SummaryKind() string {
|
||||
return "SchedulingLatency"
|
||||
func (l *SchedulingMetrics) SummaryKind() string {
|
||||
return "SchedulingMetrics"
|
||||
}
|
||||
|
||||
func (l *SchedulingLatency) PrintHumanReadable() string {
|
||||
func (l *SchedulingMetrics) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
func (l *SchedulingLatency) PrintJSON() string {
|
||||
func (l *SchedulingMetrics) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
@ -398,17 +409,17 @@ func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIRespons
|
||||
return badMetrics, metrics, nil
|
||||
}
|
||||
|
||||
// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
|
||||
// within the threshold.
|
||||
func VerifyPodStartupLatency(latency *PodStartupLatency) error {
|
||||
if latency.Latency.Perc50 > podStartupThreshold {
|
||||
return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50)
|
||||
// Verifies whether 50, 90 and 99th percentiles of a latency metric are
|
||||
// within the expected threshold.
|
||||
func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName string) error {
|
||||
if actual.Perc50 > threshold.Perc50 {
|
||||
return fmt.Errorf("too high %v latency 50th percentile: %v", metricName, actual.Perc50)
|
||||
}
|
||||
if latency.Latency.Perc90 > podStartupThreshold {
|
||||
return fmt.Errorf("too high pod startup latency 90th percentile: %v", latency.Latency.Perc90)
|
||||
if actual.Perc90 > threshold.Perc90 {
|
||||
return fmt.Errorf("too high %v latency 90th percentile: %v", metricName, actual.Perc90)
|
||||
}
|
||||
if latency.Latency.Perc99 > podStartupThreshold {
|
||||
return fmt.Errorf("too high pod startup latency 99th percentile: %v", latency.Latency.Perc99)
|
||||
if actual.Perc99 > threshold.Perc99 {
|
||||
return fmt.Errorf("too high %v latency 99th percentile: %v", metricName, actual.Perc99)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -435,27 +446,29 @@ func getMetrics(c clientset.Interface) (string, error) {
|
||||
return string(body), nil
|
||||
}
|
||||
|
||||
// Retrieves scheduler metrics information.
|
||||
func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
||||
result := SchedulingLatency{}
|
||||
// Sends REST request to kube scheduler metrics
|
||||
func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error) {
|
||||
opUpper := strings.ToUpper(op)
|
||||
if opUpper != "GET" && opUpper != "DELETE" {
|
||||
return "", fmt.Errorf("Unknown REST request")
|
||||
}
|
||||
|
||||
// Check if master Node is registered
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
|
||||
var data string
|
||||
var masterRegistered = false
|
||||
for _, node := range nodes.Items {
|
||||
if system.IsMasterNode(node.Name) {
|
||||
masterRegistered = true
|
||||
}
|
||||
}
|
||||
|
||||
var responseText string
|
||||
if masterRegistered {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
var rawData []byte
|
||||
rawData, err = c.CoreV1().RESTClient().Get().
|
||||
body, err := c.CoreV1().RESTClient().Verb(opUpper).
|
||||
Context(ctx).
|
||||
Namespace(metav1.NamespaceSystem).
|
||||
Resource("pods").
|
||||
@ -465,51 +478,65 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
||||
Do().Raw()
|
||||
|
||||
ExpectNoError(err)
|
||||
data = string(rawData)
|
||||
responseText = string(body)
|
||||
} else {
|
||||
// If master is not registered fall back to old method of using SSH.
|
||||
if TestContext.Provider == "gke" {
|
||||
Logf("Not grabbing scheduler metrics through master SSH: unsupported for gke")
|
||||
return nil, nil
|
||||
return "", nil
|
||||
}
|
||||
cmd := "curl http://localhost:10251/metrics"
|
||||
|
||||
cmd := "curl -X " + opUpper + " http://localhost:10251/metrics"
|
||||
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil || sshResult.Code != 0 {
|
||||
return &result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
|
||||
return "", fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
|
||||
}
|
||||
data = sshResult.Stdout
|
||||
responseText = sshResult.Stdout
|
||||
}
|
||||
return responseText, nil
|
||||
}
|
||||
|
||||
// Retrieves scheduler latency metrics.
|
||||
func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
||||
result := SchedulingMetrics{}
|
||||
data, err := sendRestRequestToScheduler(c, "GET")
|
||||
|
||||
samples, err := extractMetricSamples(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, sample := range samples {
|
||||
if sample.Metric[model.MetricNameLabel] != SchedulingLatencyMetricName {
|
||||
continue
|
||||
}
|
||||
|
||||
var metric *LatencyMetric = nil
|
||||
switch sample.Metric[model.MetricNameLabel] {
|
||||
case "scheduler_scheduling_algorithm_latency_microseconds":
|
||||
metric = &result.Scheduling
|
||||
case "scheduler_binding_latency_microseconds":
|
||||
metric = &result.Binding
|
||||
case "scheduler_e2e_scheduling_latency_microseconds":
|
||||
metric = &result.Total
|
||||
switch sample.Metric[schedulermetric.OperationLabel] {
|
||||
case schedulermetric.PredicateEvaluation:
|
||||
metric = &result.PredicateEvaluationLatency
|
||||
case schedulermetric.PriorityEvaluation:
|
||||
metric = &result.PriorityEvaluationLatency
|
||||
case schedulermetric.PreemptionEvaluation:
|
||||
metric = &result.PreemptionEvaluationLatency
|
||||
case schedulermetric.Binding:
|
||||
metric = &result.BindingLatency
|
||||
}
|
||||
if metric == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
latency := sample.Value
|
||||
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setQuantile(metric, quantile, time.Duration(int64(latency))*time.Microsecond)
|
||||
setQuantile(metric, quantile, time.Duration(int64(float64(sample.Value)*float64(time.Second))))
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// Verifies (currently just by logging them) the scheduling latencies.
|
||||
func VerifySchedulerLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
||||
func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
||||
latency, err := getSchedulingLatency(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -517,6 +544,14 @@ func VerifySchedulerLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
||||
return latency, nil
|
||||
}
|
||||
|
||||
func ResetSchedulerMetrics(c clientset.Interface) error {
|
||||
responseText, err := sendRestRequestToScheduler(c, "DELETE")
|
||||
if err != nil || responseText != "metrics reset\n" {
|
||||
return fmt.Errorf("Unexpected response: %q", responseText)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
@ -40,7 +40,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -403,7 +402,7 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName, hostname stri
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -447,7 +446,7 @@ func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPodName,
|
||||
|
125
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
125
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -24,11 +24,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func EtcdUpgrade(target_storage, target_version string) error {
|
||||
@ -67,7 +63,7 @@ func etcdUpgradeGCE(target_storage, target_version string) error {
|
||||
os.Environ(),
|
||||
"TEST_ETCD_VERSION="+target_version,
|
||||
"STORAGE_BACKEND="+target_storage,
|
||||
"TEST_ETCD_IMAGE=3.2.14")
|
||||
"TEST_ETCD_IMAGE=3.2.18-0")
|
||||
|
||||
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
|
||||
return err
|
||||
@ -82,11 +78,11 @@ func ingressUpgradeGCE(isUpgrade bool) error {
|
||||
command = fmt.Sprintf("sudo sed -i -re 's|(image:)(.*)|\\1 %s|' /etc/kubernetes/manifests/glbc.manifest", targetImage)
|
||||
} else {
|
||||
// Upgrade to latest HEAD image.
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 gcr.io\\/k8s-ingress-image-push\\/ingress-gce-e2e-glbc-amd64:latest/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 gcr.io\\/k8s-ingress-image-push\\/ingress-gce-e2e-glbc-amd64:master/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
}
|
||||
} else {
|
||||
// Downgrade to latest release image.
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 k8s.gcr.io\\/google_containers\\/glbc:0.9.7/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 k8s.gcr.io\\/ingress-gce-glbc-amd64:v1.1.1/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
}
|
||||
// Kubelet should restart glbc automatically.
|
||||
sshResult, err := NodeExec(GetMasterHost(), command)
|
||||
@ -107,7 +103,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
|
||||
env = append(env,
|
||||
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
|
||||
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
|
||||
"TEST_ETCD_IMAGE=3.2.14")
|
||||
"TEST_ETCD_IMAGE=3.2.18-0")
|
||||
} else {
|
||||
// In e2e tests, we skip the confirmation prompt about
|
||||
// implicit etcd upgrades to simulate the user entering "y".
|
||||
@ -119,17 +115,36 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func locationParamGKE() string {
|
||||
if TestContext.CloudConfig.MultiMaster {
|
||||
// GKE Regional Clusters are being tested.
|
||||
return fmt.Sprintf("--region=%s", TestContext.CloudConfig.Region)
|
||||
}
|
||||
return fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone)
|
||||
}
|
||||
|
||||
func appendContainerCommandGroupIfNeeded(args []string) []string {
|
||||
if TestContext.CloudConfig.Region != "" {
|
||||
// TODO(wojtek-t): Get rid of it once Regional Clusters go to GA.
|
||||
return append([]string{"beta"}, args...)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func masterUpgradeGKE(v string) error {
|
||||
Logf("Upgrading master to %q", v)
|
||||
_, _, err := RunCmd("gcloud", "container",
|
||||
args := []string{
|
||||
"container",
|
||||
"clusters",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
|
||||
locationParamGKE(),
|
||||
"upgrade",
|
||||
TestContext.CloudConfig.Cluster,
|
||||
"--master",
|
||||
fmt.Sprintf("--cluster-version=%s", v),
|
||||
"--quiet")
|
||||
"--quiet",
|
||||
}
|
||||
_, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -190,16 +205,7 @@ func NodeUpgrade(f *Framework, v string, img string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for it to complete and validate nodes are healthy.
|
||||
//
|
||||
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
|
||||
// GKE; the operation shouldn't return until they all are.
|
||||
Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout)
|
||||
if _, err := CheckNodesReady(f.ClientSet, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return waitForNodesReadyAfterUpgrade(f)
|
||||
}
|
||||
|
||||
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
|
||||
@ -208,9 +214,20 @@ func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, img string, en
|
||||
if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil {
|
||||
return err
|
||||
}
|
||||
return waitForNodesReadyAfterUpgrade(f)
|
||||
}
|
||||
|
||||
func waitForNodesReadyAfterUpgrade(f *Framework) error {
|
||||
// Wait for it to complete and validate nodes are healthy.
|
||||
Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout)
|
||||
if _, err := CheckNodesReady(f.ClientSet, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil {
|
||||
//
|
||||
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
|
||||
// GKE; the operation shouldn't return until they all are.
|
||||
numNodes, err := NumberOfRegisteredNodes(f.ClientSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't detect number of nodes")
|
||||
}
|
||||
Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
|
||||
if _, err := CheckNodesReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -235,7 +252,7 @@ func nodeUpgradeGKE(v string, img string) error {
|
||||
"container",
|
||||
"clusters",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
|
||||
locationParamGKE(),
|
||||
"upgrade",
|
||||
TestContext.CloudConfig.Cluster,
|
||||
fmt.Sprintf("--cluster-version=%s", v),
|
||||
@ -244,7 +261,7 @@ func nodeUpgradeGKE(v string, img string) error {
|
||||
if len(img) > 0 {
|
||||
args = append(args, fmt.Sprintf("--image-type=%s", img))
|
||||
}
|
||||
_, _, err := RunCmd("gcloud", args...)
|
||||
_, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@ -255,64 +272,6 @@ func nodeUpgradeGKE(v string, img string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckNodesReady waits up to nt for expect nodes accessed by c to be ready,
|
||||
// returning an error if this doesn't happen in time. It returns the names of
|
||||
// nodes it finds.
|
||||
func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]string, error) {
|
||||
// First, keep getting all of the nodes until we get the number we expect.
|
||||
var nodeList *v1.NodeList
|
||||
var errLast error
|
||||
start := time.Now()
|
||||
found := wait.Poll(Poll, nt, func() (bool, error) {
|
||||
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
|
||||
// knows about all of the nodes. Thus, we retry the list nodes call
|
||||
// until we get the expected number of nodes.
|
||||
nodeList, errLast = c.CoreV1().Nodes().List(metav1.ListOptions{
|
||||
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String()})
|
||||
if errLast != nil {
|
||||
return false, nil
|
||||
}
|
||||
if len(nodeList.Items) != expect {
|
||||
errLast = fmt.Errorf("expected to find %d nodes but found only %d (%v elapsed)",
|
||||
expect, len(nodeList.Items), time.Since(start))
|
||||
Logf("%v", errLast)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}) == nil
|
||||
nodeNames := make([]string, len(nodeList.Items))
|
||||
for i, n := range nodeList.Items {
|
||||
nodeNames[i] = n.ObjectMeta.Name
|
||||
}
|
||||
if !found {
|
||||
return nodeNames, fmt.Errorf("couldn't find %d nodes within %v; last error: %v",
|
||||
expect, nt, errLast)
|
||||
}
|
||||
Logf("Successfully found %d nodes", expect)
|
||||
|
||||
// Next, ensure in parallel that all the nodes are ready. We subtract the
|
||||
// time we spent waiting above.
|
||||
timeout := nt - time.Since(start)
|
||||
result := make(chan bool, len(nodeList.Items))
|
||||
for _, n := range nodeNames {
|
||||
n := n
|
||||
go func() { result <- WaitForNodeToBeReady(c, n, timeout) }()
|
||||
}
|
||||
failed := false
|
||||
// TODO(mbforbes): Change to `for range` syntax once we support only Go
|
||||
// >= 1.4.
|
||||
for i := range nodeList.Items {
|
||||
_ = i
|
||||
if !<-result {
|
||||
failed = true
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
return nodeNames, fmt.Errorf("at least one node failed to be ready")
|
||||
}
|
||||
return nodeNames, nil
|
||||
}
|
||||
|
||||
// MigTemplate (GCE-only) returns the name of the MIG template that the
|
||||
// nodes of the cluster use.
|
||||
func MigTemplate() (string, error) {
|
||||
|
27
vendor/k8s.io/kubernetes/test/e2e/framework/perf_util.go
generated
vendored
27
vendor/k8s.io/kubernetes/test/e2e/framework/perf_util.go
generated
vendored
@ -53,22 +53,29 @@ func ApiCallToPerfData(apicalls *APIResponsiveness) *perftype.PerfData {
|
||||
return perfData
|
||||
}
|
||||
|
||||
// PodStartupLatencyToPerfData transforms PodStartupLatency to PerfData.
|
||||
func PodStartupLatencyToPerfData(latency *PodStartupLatency) *perftype.PerfData {
|
||||
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
|
||||
item := perftype.DataItem{
|
||||
func latencyToPerfData(l LatencyMetric, name string) perftype.DataItem {
|
||||
return perftype.DataItem{
|
||||
Data: map[string]float64{
|
||||
"Perc50": float64(latency.Latency.Perc50) / 1000000, // us -> ms
|
||||
"Perc90": float64(latency.Latency.Perc90) / 1000000,
|
||||
"Perc99": float64(latency.Latency.Perc99) / 1000000,
|
||||
"Perc100": float64(latency.Latency.Perc100) / 1000000,
|
||||
"Perc50": float64(l.Perc50) / 1000000, // us -> ms
|
||||
"Perc90": float64(l.Perc90) / 1000000,
|
||||
"Perc99": float64(l.Perc99) / 1000000,
|
||||
"Perc100": float64(l.Perc100) / 1000000,
|
||||
},
|
||||
Unit: "ms",
|
||||
Labels: map[string]string{
|
||||
"Metric": "pod_startup",
|
||||
"Metric": name,
|
||||
},
|
||||
}
|
||||
perfData.DataItems = append(perfData.DataItems, item)
|
||||
}
|
||||
|
||||
// PodStartupLatencyToPerfData transforms PodStartupLatency to PerfData.
|
||||
func PodStartupLatencyToPerfData(latency *PodStartupLatency) *perftype.PerfData {
|
||||
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
|
||||
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.CreateToScheduleLatency, "create_to_schedule"))
|
||||
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.ScheduleToRunLatency, "schedule_to_run"))
|
||||
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.RunToWatchLatency, "run_to_watch"))
|
||||
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.ScheduleToWatchLatency, "schedule_to_watch"))
|
||||
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.E2ELatency, "pod_startup"))
|
||||
return perfData
|
||||
}
|
||||
|
||||
|
49
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
49
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
@ -19,7 +19,6 @@ package framework
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@ -69,36 +68,44 @@ func gatherProfileOfKind(profileBaseName, kind string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to execute curl command on master through SSH: %v", err)
|
||||
}
|
||||
// Write the data to a temp file.
|
||||
var tmpfile *os.File
|
||||
tmpfile, err = ioutil.TempFile("", "apiserver-profile")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create temp file for profile data: %v", err)
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
if _, err := tmpfile.Write([]byte(sshResult.Stdout)); err != nil {
|
||||
return fmt.Errorf("Failed to write temp file with profile data: %v", err)
|
||||
}
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
return fmt.Errorf("Failed to close temp file: %v", err)
|
||||
}
|
||||
// Create a graph from the data and write it to a pdf file.
|
||||
var cmd *exec.Cmd
|
||||
|
||||
var profilePrefix string
|
||||
switch {
|
||||
// TODO: Support other profile kinds if needed (e.g inuse_space, alloc_objects, mutex, etc)
|
||||
case kind == "heap":
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", "--alloc_space", tmpfile.Name())
|
||||
profilePrefix = "ApiserverMemoryProfile_"
|
||||
case strings.HasPrefix(kind, "profile"):
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", tmpfile.Name())
|
||||
profilePrefix = "ApiserverCPUProfile_"
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", kind)
|
||||
}
|
||||
|
||||
// Write the data to a file.
|
||||
rawprofilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pprof")
|
||||
rawprofile, err := os.Create(rawprofilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create file for the profile graph: %v", err)
|
||||
}
|
||||
defer rawprofile.Close()
|
||||
|
||||
if _, err := rawprofile.Write([]byte(sshResult.Stdout)); err != nil {
|
||||
return fmt.Errorf("Failed to write file with profile data: %v", err)
|
||||
}
|
||||
if err := rawprofile.Close(); err != nil {
|
||||
return fmt.Errorf("Failed to close file: %v", err)
|
||||
}
|
||||
// Create a graph from the data and write it to a pdf file.
|
||||
var cmd *exec.Cmd
|
||||
switch {
|
||||
// TODO: Support other profile kinds if needed (e.g inuse_space, alloc_objects, mutex, etc)
|
||||
case kind == "heap":
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", "--alloc_space", rawprofile.Name())
|
||||
case strings.HasPrefix(kind, "profile"):
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", rawprofile.Name())
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", kind)
|
||||
}
|
||||
outfilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pdf")
|
||||
var outfile *os.File
|
||||
outfile, err = os.Create(outfilePath)
|
||||
outfile, err := os.Create(outfilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create file for the profile graph: %v", err)
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/framework/psp_util.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/framework/psp_util.go
generated
vendored
@ -71,6 +71,7 @@ func PrivilegedPSP(name string) *extensionsv1beta1.PodSecurityPolicy {
|
||||
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
AllowedUnsafeSysctls: []string{"*"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
127
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
127
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -35,7 +36,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -678,6 +678,22 @@ func DeletePDWithRetry(diskName string) error {
|
||||
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
|
||||
}
|
||||
|
||||
func newAWSClient(zone string) *ec2.EC2 {
|
||||
var cfg *aws.Config
|
||||
|
||||
if zone == "" {
|
||||
zone = TestContext.CloudConfig.Zone
|
||||
}
|
||||
if zone == "" {
|
||||
glog.Warning("No AWS zone configured!")
|
||||
cfg = nil
|
||||
} else {
|
||||
region := zone[:len(zone)-1]
|
||||
cfg = &aws.Config{Region: aws.String(region)}
|
||||
}
|
||||
return ec2.New(session.New(), cfg)
|
||||
}
|
||||
|
||||
func createPD(zone string) (string, error) {
|
||||
if zone == "" {
|
||||
zone = TestContext.CloudConfig.Zone
|
||||
@ -691,6 +707,14 @@ func createPD(zone string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if zone == "" && TestContext.CloudConfig.MultiZone {
|
||||
zones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
zone, _ = zones.PopAny()
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, zone, 10 /* sizeGb */, tags)
|
||||
if err != nil {
|
||||
@ -698,8 +722,7 @@ func createPD(zone string) (string, error) {
|
||||
}
|
||||
return pdName, nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := ec2.New(session.New())
|
||||
|
||||
client := newAWSClient(zone)
|
||||
request := &ec2.CreateVolumeInput{}
|
||||
request.AvailabilityZone = aws.String(zone)
|
||||
request.Size = aws.Int64(10)
|
||||
@ -751,7 +774,7 @@ func deletePD(pdName string) error {
|
||||
}
|
||||
return err
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := ec2.New(session.New())
|
||||
client := newAWSClient("")
|
||||
|
||||
tokens := strings.Split(pdName, "/")
|
||||
awsVolumeID := tokens[len(tokens)-1]
|
||||
@ -792,12 +815,12 @@ func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-tester-",
|
||||
@ -833,19 +856,64 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
|
||||
return podSpec
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod.
|
||||
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
|
||||
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
}
|
||||
podName := "security-context-" + string(uuid.NewUUID())
|
||||
fsGroup := int64(1000)
|
||||
// Returns a pod definition based on the namespace using nginx image
|
||||
func MakeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod {
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-tester-",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "nginx",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "http-server",
|
||||
ContainerPort: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Volumes = volumes
|
||||
if nodeSelector != nil {
|
||||
podSpec.Spec.NodeSelector = nodeSelector
|
||||
}
|
||||
return podSpec
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod.
|
||||
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
|
||||
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
podName := "security-context-" + string(uuid.NewUUID())
|
||||
if fsGroup == nil {
|
||||
fsGroup = func(i int64) *int64 {
|
||||
return &i
|
||||
}(1000)
|
||||
}
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -855,7 +923,7 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo
|
||||
HostIPC: hostIPC,
|
||||
HostPID: hostPID,
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
FSGroup: &fsGroup,
|
||||
FSGroup: fsGroup,
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
@ -911,9 +979,8 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// create security pod with given claims
|
||||
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel)
|
||||
func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||
pod := MakeNginxPod(namespace, nodeSelector, pvclaims)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
@ -931,6 +998,26 @@ func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.P
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// create security pod with given claims
|
||||
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to be running
|
||||
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
|
||||
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
||||
|
21
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
@ -25,13 +25,12 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@ -60,7 +59,7 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -85,9 +84,7 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
|
||||
|
||||
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
||||
// none are running, otherwise it does what a synchronous scale operation would do.
|
||||
//TODO(p0lyn0mial): remove internalClientset.
|
||||
//TODO(p0lyn0mial): update the callers.
|
||||
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error {
|
||||
func ScaleRCByLabels(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error {
|
||||
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
|
||||
rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts)
|
||||
if err != nil {
|
||||
@ -99,7 +96,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl
|
||||
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
|
||||
for _, labelRC := range rcs.Items {
|
||||
name := labelRC.Name
|
||||
if err := ScaleRC(clientset, internalClientset, scalesGetter, ns, name, replicas, false); err != nil {
|
||||
if err := ScaleRC(clientset, scalesGetter, ns, name, replicas, false); err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
@ -107,7 +104,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl
|
||||
return err
|
||||
}
|
||||
if replicas == 0 {
|
||||
ps, err := podStoreForSelector(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector))
|
||||
ps, err := testutils.NewPodStore(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -155,12 +152,8 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers"))
|
||||
func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers"))
|
||||
}
|
||||
|
||||
func RunRC(config testutils.RCConfig) error {
|
||||
|
30
vendor/k8s.io/kubernetes/test/e2e/framework/rs_util.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/e2e/framework/rs_util.go
generated
vendored
@ -21,29 +21,29 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
type updateRsFunc func(d *extensions.ReplicaSet)
|
||||
type updateRsFunc func(d *apps.ReplicaSet)
|
||||
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*extensions.ReplicaSet, error) {
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
|
||||
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// CheckNewRSAnnotations check if the new RS's annotation is as expected
|
||||
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -59,7 +59,7 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp
|
||||
// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready.
|
||||
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
rs, err := c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -72,7 +72,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
}
|
||||
|
||||
// WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
|
||||
func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) error {
|
||||
func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *apps.ReplicaSet) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
@ -88,10 +88,10 @@ func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGette
|
||||
}
|
||||
|
||||
// WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
|
||||
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error {
|
||||
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -104,10 +104,10 @@ func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *exte
|
||||
}
|
||||
|
||||
// WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum
|
||||
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error {
|
||||
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -126,8 +126,8 @@ func RunReplicaSet(config testutils.ReplicaSetConfig) error {
|
||||
return testutils.RunReplicaSet(config)
|
||||
}
|
||||
|
||||
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
@ -136,7 +136,7 @@ func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
|
147
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
147
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
@ -94,6 +94,10 @@ const (
|
||||
// GCPMaxInstancesInInstanceGroup is the maximum number of instances supported in
|
||||
// one instance group on GCP.
|
||||
GCPMaxInstancesInInstanceGroup = 2000
|
||||
|
||||
// AffinityConfirmCount is the number of needed continuous requests to confirm that
|
||||
// affinity is enabled.
|
||||
AffinityConfirmCount = 15
|
||||
)
|
||||
|
||||
// This should match whatever the default/configured range is
|
||||
@ -211,6 +215,20 @@ func (j *ServiceTestJig) CreateExternalNameServiceOrFail(namespace string, tweak
|
||||
return result
|
||||
}
|
||||
|
||||
// CreateServiceWithServicePort creates a new Service with ServicePort.
|
||||
func (j *ServiceTestJig) CreateServiceWithServicePort(labels map[string]string, namespace string, ports []v1.ServicePort) (*v1.Service, error) {
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: j.Name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: labels,
|
||||
Ports: ports,
|
||||
},
|
||||
}
|
||||
return j.Client.CoreV1().Services(namespace).Create(service)
|
||||
}
|
||||
|
||||
func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) {
|
||||
ingressIP := ""
|
||||
svc := j.UpdateServiceOrFail(namespace, name, func(s *v1.Service) {
|
||||
@ -1192,26 +1210,13 @@ func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin
|
||||
Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, ServiceStartTimeout)
|
||||
}
|
||||
|
||||
// StartServeHostnameService creates a replication controller that serves its hostname and a service on top of it.
|
||||
func StartServeHostnameService(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, port, replicas int) ([]string, string, error) {
|
||||
// StartServeHostnameService creates a replication controller that serves its
|
||||
// hostname and a service on top of it.
|
||||
func StartServeHostnameService(c clientset.Interface, internalClient internalclientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) {
|
||||
podNames := make([]string, replicas)
|
||||
|
||||
name := svc.ObjectMeta.Name
|
||||
By("creating service " + name + " in namespace " + ns)
|
||||
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: int32(port),
|
||||
TargetPort: intstr.FromInt(9376),
|
||||
Protocol: "TCP",
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
})
|
||||
_, err := c.CoreV1().Services(ns).Create(svc)
|
||||
if err != nil {
|
||||
return podNames, "", err
|
||||
}
|
||||
@ -1255,8 +1260,8 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli
|
||||
return podNames, serviceIP, nil
|
||||
}
|
||||
|
||||
func StopServeHostnameService(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||
if err := DeleteRCAndPods(clientset, internalClientset, ns, name); err != nil {
|
||||
func StopServeHostnameService(clientset clientset.Interface, ns, name string) error {
|
||||
if err := DeleteRCAndWaitForGC(clientset, ns, name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil {
|
||||
@ -1368,17 +1373,17 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI
|
||||
return fmt.Errorf("waiting for service to be down timed out")
|
||||
}
|
||||
|
||||
func CleanupServiceResources(c clientset.Interface, loadBalancerName, zone string) {
|
||||
func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
CleanupServiceGCEResources(c, loadBalancerName, zone)
|
||||
CleanupServiceGCEResources(c, loadBalancerName, region, zone)
|
||||
}
|
||||
|
||||
// TODO: we need to add this function with other cloud providers, if there is a need.
|
||||
}
|
||||
|
||||
func CleanupServiceGCEResources(c clientset.Interface, loadBalancerName, zone string) {
|
||||
func CleanupServiceGCEResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
if pollErr := wait.Poll(5*time.Second, LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := CleanupGCEResources(c, loadBalancerName, zone); err != nil {
|
||||
if err := CleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
|
||||
Logf("Still waiting for glbc to cleanup: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
@ -1450,3 +1455,97 @@ func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration
|
||||
}
|
||||
return LoadBalancerCreateTimeoutDefault
|
||||
}
|
||||
|
||||
// affinityTracker tracks the destination of a request for the affinity tests.
|
||||
type affinityTracker struct {
|
||||
hostTrace []string
|
||||
}
|
||||
|
||||
// Record the response going to a given host.
|
||||
func (at *affinityTracker) recordHost(host string) {
|
||||
at.hostTrace = append(at.hostTrace, host)
|
||||
}
|
||||
|
||||
// Check that we got a constant count requests going to the same host.
|
||||
func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds bool) {
|
||||
fulfilled = (len(at.hostTrace) >= count)
|
||||
if len(at.hostTrace) == 0 {
|
||||
return fulfilled, true
|
||||
}
|
||||
last := at.hostTrace[0:]
|
||||
if len(at.hostTrace)-count >= 0 {
|
||||
last = at.hostTrace[len(at.hostTrace)-count:]
|
||||
}
|
||||
host := at.hostTrace[len(at.hostTrace)-1]
|
||||
for _, h := range last {
|
||||
if h != host {
|
||||
return fulfilled, false
|
||||
}
|
||||
}
|
||||
return fulfilled, true
|
||||
}
|
||||
|
||||
func checkAffinityFailed(tracker affinityTracker, err string) {
|
||||
Logf("%v", tracker.hostTrace)
|
||||
Failf(err)
|
||||
}
|
||||
|
||||
// CheckAffinity function tests whether the service affinity works as expected.
|
||||
// If affinity is expected and transitionState is true, the test will
|
||||
// return true once affinityConfirmCount number of same response observed in a
|
||||
// row. If affinity is not expected, the test will keep observe until different
|
||||
// responses observed. The function will return false only when no expected
|
||||
// responses observed before timeout. If transitionState is false, the test will
|
||||
// fail once different host is given if shouldHold is true.
|
||||
func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, targetPort int, shouldHold, transitionState bool) bool {
|
||||
targetIpPort := net.JoinHostPort(targetIp, strconv.Itoa(targetPort))
|
||||
cmd := fmt.Sprintf(`wget -qO- http://%s/ -T 2`, targetIpPort)
|
||||
timeout := ServiceTestTimeout
|
||||
if execPod == nil {
|
||||
timeout = LoadBalancerPollTimeout
|
||||
}
|
||||
var tracker affinityTracker
|
||||
if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
if execPod != nil {
|
||||
if stdout, err := RunHostCmd(execPod.Namespace, execPod.Name, cmd); err != nil {
|
||||
Logf("Failed to get response from %s. Retry until timeout", targetIpPort)
|
||||
return false, nil
|
||||
} else {
|
||||
tracker.recordHost(stdout)
|
||||
}
|
||||
} else {
|
||||
rawResponse := jig.GetHTTPContent(targetIp, targetPort, timeout, "")
|
||||
tracker.recordHost(rawResponse.String())
|
||||
}
|
||||
trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount)
|
||||
if !shouldHold && !affinityHolds {
|
||||
return true, nil
|
||||
}
|
||||
if shouldHold {
|
||||
if !transitionState && !affinityHolds {
|
||||
return true, fmt.Errorf("Affintity should hold but didn't.")
|
||||
}
|
||||
if trackerFulfilled && affinityHolds {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}); pollErr != nil {
|
||||
trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount)
|
||||
if pollErr != wait.ErrWaitTimeout {
|
||||
checkAffinityFailed(tracker, pollErr.Error())
|
||||
return false
|
||||
} else {
|
||||
if !trackerFulfilled {
|
||||
checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", targetIpPort))
|
||||
}
|
||||
if shouldHold {
|
||||
checkAffinityFailed(tracker, "Affintity should hold but didn't.")
|
||||
} else {
|
||||
checkAffinityFailed(tracker, "Affintity shouldn't hold but did.")
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
44
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
44
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -49,7 +49,12 @@ type TestContextType struct {
|
||||
RepoRoot string
|
||||
DockershimCheckpointDir string
|
||||
|
||||
Provider string
|
||||
// Provider identifies the infrastructure provider (gce, gke, aws)
|
||||
Provider string
|
||||
|
||||
// Tooling is the tooling in use (e.g. kops, gke). Provider is the cloud provider and might not uniquely identify the tooling.
|
||||
Tooling string
|
||||
|
||||
CloudConfig CloudConfig
|
||||
KubectlPath string
|
||||
OutputDir string
|
||||
@ -104,14 +109,16 @@ type TestContextType struct {
|
||||
DisableLogDump bool
|
||||
// Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty.
|
||||
LogexporterGCSPath string
|
||||
// If the garbage collector is enabled in the kube-apiserver and kube-controller-manager.
|
||||
GarbageCollectorEnabled bool
|
||||
// featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
|
||||
FeatureGates map[string]bool
|
||||
// Node e2e specific test context
|
||||
NodeTestContextType
|
||||
// Storage e2e specific test context
|
||||
StorageTestContextType
|
||||
// Monitoring solution that is used in current cluster.
|
||||
ClusterMonitoringMode string
|
||||
// Separate Prometheus monitoring deployed in cluster
|
||||
EnablePrometheusMonitoring bool
|
||||
|
||||
// Indicates what path the kubernetes-anywhere is installed on
|
||||
KubernetesAnywherePath string
|
||||
@ -155,12 +162,21 @@ type NodeTestContextType struct {
|
||||
SystemSpecName string
|
||||
}
|
||||
|
||||
// StorageConfig contains the shared settings for storage 2e2 tests.
|
||||
type StorageTestContextType struct {
|
||||
// CSIImageVersion overrides the builtin stable version numbers if set.
|
||||
CSIImageVersion string
|
||||
// CSIImageRegistry defines the image registry hosting the CSI container images.
|
||||
CSIImageRegistry string
|
||||
}
|
||||
|
||||
type CloudConfig struct {
|
||||
ApiEndpoint string
|
||||
ProjectID string
|
||||
Zone string // for multizone tests, arbitrarily chosen zone
|
||||
Region string
|
||||
MultiZone bool
|
||||
MultiMaster bool
|
||||
Cluster string
|
||||
MasterName string
|
||||
NodeInstanceGroup string // comma-delimited list of groups' names
|
||||
@ -208,7 +224,7 @@ func RegisterCommonFlags() {
|
||||
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
|
||||
flag.Var(utilflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
flag.StringVar(&TestContext.Viper, "viper-config", "e2e", "The name of the viper config i.e. 'e2e' will read values from 'e2e.json' locally. All e2e parameters are meant to be configurable by viper.")
|
||||
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/rkt/remote).")
|
||||
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/remote).")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
|
||||
flag.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.")
|
||||
@ -230,12 +246,14 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
|
||||
flag.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
|
||||
flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, etc.)")
|
||||
flag.StringVar(&TestContext.Tooling, "tooling", "", "The tooling in use (kops, gke, etc.)")
|
||||
flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
|
||||
flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
|
||||
flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
|
||||
flag.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, trusty, or coreos).")
|
||||
flag.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, trusty, or coreos).")
|
||||
flag.StringVar(&TestContext.ClusterMonitoringMode, "cluster-monitoring-mode", "influxdb", "The monitoring solution that is used in the cluster.")
|
||||
flag.StringVar(&TestContext.ClusterMonitoringMode, "cluster-monitoring-mode", "standalone", "The monitoring solution that is used in the cluster.")
|
||||
flag.BoolVar(&TestContext.EnablePrometheusMonitoring, "prometheus-monitoring", false, "Separate Prometheus monitoring deployed in cluster.")
|
||||
|
||||
// TODO: Flags per provider? Rename gce-project/gce-zone?
|
||||
cloudConfig := &TestContext.CloudConfig
|
||||
@ -245,6 +263,7 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.Region, "gce-region", "", "GCE region being used, if applicable")
|
||||
flag.BoolVar(&cloudConfig.MultiZone, "gce-multizone", false, "If true, start GCE cloud provider with multizone support.")
|
||||
flag.BoolVar(&cloudConfig.MultiMaster, "gce-multimaster", false, "If true, the underlying GCE/GKE cluster is assumed to be multi-master.")
|
||||
flag.StringVar(&cloudConfig.Cluster, "gke-cluster", "", "GKE name of cluster being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.NodeInstanceGroup, "node-instance-group", "", "Name of the managed instance group for nodes. Valid only for gce, gke or aws. If there is more than one group: comma separated list of groups.")
|
||||
flag.StringVar(&cloudConfig.Network, "network", "e2e", "The cloud provider network for this e2e cluster.")
|
||||
@ -257,7 +276,7 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure.")
|
||||
flag.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
|
||||
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
|
||||
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 4*time.Hour, "Timeout for waiting for all nodes to be schedulable.")
|
||||
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
|
||||
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
|
||||
@ -265,7 +284,6 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.")
|
||||
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
|
||||
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
|
||||
flag.BoolVar(&TestContext.GarbageCollectorEnabled, "garbage-collector-enabled", true, "Set to true if the garbage collector is enabled in the kube-apiserver and kube-controller-manager, then some tests will rely on the garbage collector to delete dependent resources.")
|
||||
}
|
||||
|
||||
// Register flags specific to the node e2e test suite.
|
||||
@ -275,7 +293,7 @@ func RegisterNodeFlags() {
|
||||
flag.StringVar(&TestContext.NodeName, "node-name", "", "Name of the node to run tests on.")
|
||||
// TODO(random-liu): Move kubelet start logic out of the test.
|
||||
// TODO(random-liu): Move log fetch logic out of the test.
|
||||
// There are different ways to start kubelet (systemd, initd, docker, rkt, manually started etc.)
|
||||
// There are different ways to start kubelet (systemd, initd, docker, manually started etc.)
|
||||
// and manage logs (journald, upstart etc.).
|
||||
// For different situation we need to mount different things into the container, run different commands.
|
||||
// It is hard and unnecessary to deal with the complexity inside the test suite.
|
||||
@ -285,6 +303,11 @@ func RegisterNodeFlags() {
|
||||
flag.StringVar(&TestContext.SystemSpecName, "system-spec-name", "", "The name of the system spec (e.g., gke) that's used in the node e2e test. The system specs are in test/e2e_node/system/specs/. This is used by the test framework to determine which tests to run for validating the system requirements.")
|
||||
}
|
||||
|
||||
func RegisterStorageFlags() {
|
||||
flag.StringVar(&TestContext.CSIImageVersion, "csiImageVersion", "", "overrides the default tag used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
|
||||
flag.StringVar(&TestContext.CSIImageRegistry, "csiImageRegistry", "quay.io/k8scsi", "overrides the default repository used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
|
||||
}
|
||||
|
||||
// ViperizeFlags sets up all flag and config processing. Future configuration info should be added to viper, not to flags.
|
||||
func ViperizeFlags() {
|
||||
|
||||
@ -293,6 +316,7 @@ func ViperizeFlags() {
|
||||
// since go test 'flag's are sort of incompatible w/ flag, glog, etc.
|
||||
RegisterCommonFlags()
|
||||
RegisterClusterFlags()
|
||||
RegisterStorageFlags()
|
||||
flag.Parse()
|
||||
|
||||
// Part 2: Set Viper provided flags.
|
||||
@ -363,4 +387,8 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
t.Host = defaultHost
|
||||
}
|
||||
}
|
||||
// Allow 1% of nodes to be unready (statistically) - relevant for large clusters.
|
||||
if t.AllowedNotReadyNodes == 0 {
|
||||
t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100
|
||||
}
|
||||
}
|
||||
|
477
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
477
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
File diff suppressed because it is too large
Load Diff
33
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
33
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
@ -67,6 +67,10 @@ const (
|
||||
|
||||
// Waiting period for volume server (Ceph, ...) to initialize itself.
|
||||
VolumeServerPodStartupSleep = 20 * time.Second
|
||||
|
||||
// Waiting period for pod to be cleaned up and unmount its volumes so we
|
||||
// don't tear down containers with NFS/Ceph/Gluster server too early.
|
||||
PodCleanupTimeout = 20 * time.Second
|
||||
)
|
||||
|
||||
// Configuration of one tests. The test consist of:
|
||||
@ -182,7 +186,7 @@ func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTest
|
||||
}
|
||||
|
||||
// CephRBD-specific wrapper for CreateStorageServer.
|
||||
func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||
func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "rbd",
|
||||
@ -201,7 +205,28 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo
|
||||
Logf("sleeping a bit to give ceph server time to initialize")
|
||||
time.Sleep(VolumeServerPodStartupSleep)
|
||||
|
||||
return config, pod, ip
|
||||
// create secrets for the server
|
||||
secret = &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
// from test/images/volumes-tester/rbd/keyring
|
||||
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
|
||||
},
|
||||
Type: "kubernetes.io/rbd",
|
||||
}
|
||||
|
||||
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret)
|
||||
if err != nil {
|
||||
Failf("Failed to create secrets for Ceph RBD: %v", err)
|
||||
}
|
||||
|
||||
return config, pod, secret, ip
|
||||
}
|
||||
|
||||
// Wrapper for StartVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
@ -351,8 +376,8 @@ func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
|
||||
}
|
||||
// See issue #24100.
|
||||
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
||||
By("sleeping a bit so client can stop and unmount")
|
||||
time.Sleep(20 * time.Second)
|
||||
By("sleeping a bit so kubelet can unmount and detach the volume")
|
||||
time.Sleep(PodCleanupTimeout)
|
||||
|
||||
err = podClient.Delete(config.Prefix+"-server", nil)
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user