Migrate from snapClient.VolumesnapshotV1alpha1Client to

snapClient.SnapshotV1alpha1Client and also update kube dependency

Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
Humble Chirammal
2019-06-24 14:38:09 +05:30
committed by mergify[bot]
parent 3bc6771df8
commit 22ff5c0911
1031 changed files with 34242 additions and 177906 deletions

View File

@ -14,16 +14,17 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
package auth
import (
"k8s.io/klog"
"fmt"
"sync"
"time"
"github.com/onsi/ginkgo"
"github.com/pkg/errors"
authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
@ -36,13 +37,19 @@ const (
policyCachePollTimeout = 5 * time.Second
)
type bindingsGetter interface {
v1beta1rbac.RoleBindingsGetter
v1beta1rbac.ClusterRoleBindingsGetter
v1beta1rbac.ClusterRolesGetter
}
// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action.
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
func WaitForAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error {
return WaitForNamedAuthorizationUpdate(c, user, namespace, verb, "", resource, allowed)
}
// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action on the named resource.
// WaitForNamedAuthorizationUpdate checks if the given user can perform the named verb and action on the named resource.
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb, resourceName string, resource schema.GroupResource, allowed bool) error {
review := &authorizationv1beta1.SubjectAccessReview{
@ -57,15 +64,9 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews
User: user,
},
}
err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) {
response, err := c.SubjectAccessReviews().Create(review)
// GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine
// has adjusted as expected. In this case, simply wait one second and hope it's up to date
if apierrors.IsNotFound(err) {
klog.Info("SubjectAccessReview endpoint is missing")
time.Sleep(1 * time.Second)
return true, nil
}
if err != nil {
return false, err
}
@ -77,8 +78,13 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews
return err
}
// BindClusterRole binds the cluster role at the cluster scope
func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) {
// BindClusterRole binds the cluster role at the cluster scope. If RBAC is not enabled, nil
// is returned with no action.
func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) error {
if !IsRBACEnabled(c) {
return nil
}
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
_, err := c.ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
@ -92,23 +98,30 @@ func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns st
Subjects: subjects,
})
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
if err != nil {
klog.Errorf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
return errors.Wrapf(err, "binding clusterrole/%s for %q for %v", clusterRole, ns, subjects)
}
return nil
}
// BindClusterRoleInNamespace binds the cluster role at the namespace scope
func BindClusterRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) {
bindInNamespace(c, "ClusterRole", clusterRole, ns, subjects...)
// BindClusterRoleInNamespace binds the cluster role at the namespace scope. If RBAC is not enabled, nil
// is returned with no action.
func BindClusterRoleInNamespace(c bindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) error {
return bindInNamespace(c, "ClusterRole", clusterRole, ns, subjects...)
}
// BindRoleInNamespace binds the role at the namespace scope
func BindRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, role, ns string, subjects ...rbacv1beta1.Subject) {
bindInNamespace(c, "Role", role, ns, subjects...)
// BindRoleInNamespace binds the role at the namespace scope. If RBAC is not enabled, nil
// is returned with no action.
func BindRoleInNamespace(c bindingsGetter, role, ns string, subjects ...rbacv1beta1.Subject) error {
return bindInNamespace(c, "Role", role, ns, subjects...)
}
func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string, subjects ...rbacv1beta1.Subject) {
func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rbacv1beta1.Subject) error {
if !IsRBACEnabled(c) {
return nil
}
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
_, err := c.RoleBindings(ns).Create(&rbacv1beta1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
@ -122,10 +135,11 @@ func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string
Subjects: subjects,
})
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
if err != nil {
klog.Errorf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
return errors.Wrapf(err, "binding %s/%s into %q for %v", roleType, role, ns, subjects)
}
return nil
}
var (
@ -133,19 +147,42 @@ var (
isRBACEnabled bool
)
func IsRBACEnabled(f *Framework) bool {
// IsRBACEnabled returns true if RBAC is enabled. Otherwise false.
func IsRBACEnabled(crGetter v1beta1rbac.ClusterRolesGetter) bool {
isRBACEnabledOnce.Do(func() {
crs, err := f.ClientSet.RbacV1().ClusterRoles().List(metav1.ListOptions{})
crs, err := crGetter.ClusterRoles().List(metav1.ListOptions{})
if err != nil {
Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
isRBACEnabled = false
} else if crs == nil || len(crs.Items) == 0 {
Logf("No ClusterRoles found; assuming RBAC is disabled.")
logf("No ClusterRoles found; assuming RBAC is disabled.")
isRBACEnabled = false
} else {
Logf("Found ClusterRoles; assuming RBAC is enabled.")
logf("Found ClusterRoles; assuming RBAC is enabled.")
isRBACEnabled = true
}
})
return isRBACEnabled
}
// logf logs INFO lines to the GinkgoWriter.
// TODO: Log functions like these should be put into their own package,
// see: https://github.com/kubernetes/kubernetes/issues/76728
func logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
// log prints formatted log messages to the global GinkgoWriter.
// TODO: Log functions like these should be put into their own package,
// see: https://github.com/kubernetes/kubernetes/issues/76728
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
// nowStamp returns the current time formatted for placement in the logs (time.StampMilli).
// TODO: If only used for logging, this should be put into a logging package,
// see: https://github.com/kubernetes/kubernetes/issues/76728
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}

View File

@ -18,6 +18,7 @@ package framework
import "sync"
// CleanupActionHandle is an integer pointer type for handling cleanup action
type CleanupActionHandle *int
var cleanupActionsLock sync.Mutex

View File

@ -31,14 +31,15 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api/legacyscheme"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
)
// LoadFromManifests loads .yaml or .json manifest files and returns
// all items that it finds in them. It supports all items for which
// there is a factory registered in Factories and .yaml files with
// there is a factory registered in factories and .yaml files with
// multiple items separated by "---". Files are accessed via the
// "testfiles" package, which means they can come from a file system
// or be built into the binary.
@ -54,17 +55,17 @@ func (f *Framework) LoadFromManifests(files ...string) ([]interface{}, error) {
err := visitManifests(func(data []byte) error {
// Ignore any additional fields for now, just determine what we have.
var what What
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), data, &what); err != nil {
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, &what); err != nil {
return errors.Wrap(err, "decode TypeMeta")
}
factory := Factories[what]
factory := factories[what]
if factory == nil {
return errors.Errorf("item of type %+v not supported", what)
}
object := factory.New()
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), data, object); err != nil {
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, object); err != nil {
return errors.Wrapf(err, "decode %+v", what)
}
items = append(items, object)
@ -114,7 +115,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
func (f *Framework) PatchItems(items ...interface{}) error {
for _, item := range items {
// Uncomment when debugging the loading and patching of items.
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
// e2elog.Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
if err := f.patchItemRecursively(item); err != nil {
return err
}
@ -153,7 +154,7 @@ func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
// to non-namespaced items.
for _, destructor := range destructors {
if err := destructor(); err != nil && !apierrs.IsNotFound(err) {
Logf("deleting failed: %s", err)
e2elog.Logf("deleting failed: %s", err)
}
}
}
@ -166,19 +167,19 @@ func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
description := DescribeItem(item)
// Uncomment this line to get a full dump of the entire item.
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
Logf("creating %s", description)
for _, factory := range Factories {
e2elog.Logf("creating %s", description)
for _, factory := range factories {
destructor, err := factory.Create(f, item)
if destructor != nil {
destructors = append(destructors, func() error {
Logf("deleting %s", description)
e2elog.Logf("deleting %s", description)
return destructor()
})
}
if err == nil {
done = true
break
} else if errors.Cause(err) != ItemNotSupported {
} else if errors.Cause(err) != errorItemNotSupported {
result = err
break
}
@ -224,18 +225,22 @@ type What struct {
Kind string `json:"kind"`
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new What.
func (in *What) DeepCopy() *What {
return &What{Kind: in.Kind}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out.
func (in *What) DeepCopyInto(out *What) {
out.Kind = in.Kind
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *What) DeepCopyObject() runtime.Object {
return &What{Kind: in.Kind}
}
// GetObjectKind returns the ObjectKind schema
func (in *What) GetObjectKind() schema.ObjectKind {
return nil
}
@ -250,7 +255,7 @@ type ItemFactory interface {
// Create is responsible for creating the item. It returns an
// error or a cleanup function for the created item.
// If the item is of an unsupported type, it must return
// an error that has ItemNotSupported as cause.
// an error that has errorItemNotSupported as cause.
Create(f *Framework, item interface{}) (func() error, error)
}
@ -266,11 +271,11 @@ func DescribeItem(item interface{}) string {
return fmt.Sprintf("%T: %s", item, item)
}
// ItemNotSupported is the error that Create methods
// errorItemNotSupported is the error that Create methods
// must return or wrap when they don't support the given item.
var ItemNotSupported = errors.New("not supported")
var errorItemNotSupported = errors.New("not supported")
var Factories = map[What]ItemFactory{
var factories = map[What]ItemFactory{
{"ClusterRole"}: &clusterRoleFactory{},
{"ClusterRoleBinding"}: &clusterRoleBindingFactory{},
{"DaemonSet"}: &daemonSetFactory{},
@ -372,7 +377,7 @@ func (f *serviceAccountFactory) New() runtime.Object {
func (*serviceAccountFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*v1.ServiceAccount)
if !ok {
return nil, ItemNotSupported
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.GetName())
if _, err := client.Create(item); err != nil {
@ -392,10 +397,10 @@ func (f *clusterRoleFactory) New() runtime.Object {
func (*clusterRoleFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*rbac.ClusterRole)
if !ok {
return nil, ItemNotSupported
return nil, errorItemNotSupported
}
Logf("Define cluster role %v", item.GetName())
e2elog.Logf("Define cluster role %v", item.GetName())
client := f.ClientSet.RbacV1().ClusterRoles()
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create ClusterRole")
@ -414,7 +419,7 @@ func (f *clusterRoleBindingFactory) New() runtime.Object {
func (*clusterRoleBindingFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*rbac.ClusterRoleBinding)
if !ok {
return nil, ItemNotSupported
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().ClusterRoleBindings()
@ -435,7 +440,7 @@ func (f *roleFactory) New() runtime.Object {
func (*roleFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*rbac.Role)
if !ok {
return nil, ItemNotSupported
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().Roles(f.Namespace.GetName())
@ -456,7 +461,7 @@ func (f *roleBindingFactory) New() runtime.Object {
func (*roleBindingFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*rbac.RoleBinding)
if !ok {
return nil, ItemNotSupported
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().RoleBindings(f.Namespace.GetName())
@ -477,7 +482,7 @@ func (f *serviceFactory) New() runtime.Object {
func (*serviceFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*v1.Service)
if !ok {
return nil, ItemNotSupported
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().Services(f.Namespace.GetName())
@ -498,7 +503,7 @@ func (f *statefulSetFactory) New() runtime.Object {
func (*statefulSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*apps.StatefulSet)
if !ok {
return nil, ItemNotSupported
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().StatefulSets(f.Namespace.GetName())
@ -519,7 +524,7 @@ func (f *daemonSetFactory) New() runtime.Object {
func (*daemonSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*apps.DaemonSet)
if !ok {
return nil, ItemNotSupported
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().DaemonSets(f.Namespace.GetName())
@ -540,7 +545,7 @@ func (f *storageClassFactory) New() runtime.Object {
func (*storageClassFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*storage.StorageClass)
if !ok {
return nil, ItemNotSupported
return nil, errorItemNotSupported
}
client := f.ClientSet.StorageV1().StorageClasses()
@ -561,7 +566,7 @@ func (f *secretFactory) New() runtime.Object {
func (*secretFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*v1.Secret)
if !ok {
return nil, ItemNotSupported
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().Secrets(f.Namespace.GetName())

View File

@ -1,309 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
watchtools "k8s.io/client-go/tools/watch"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
}
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*apps.ReplicaSet
var d *apps.Deployment
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
d = deployment
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
logReplicaSetsOfDeployment(d, oldRSs, nil)
}
return pollErr
}
func logReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) {
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
}
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
}
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType) error {
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, Logf, Poll, pollLongTimeout)
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
// may result in taking longer to relabel a RS.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
}
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType apps.DeploymentStrategyType) *apps.Deployment {
zero := int64(0)
return &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Labels: podLabels,
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
Strategy: apps.DeploymentStrategy{
Type: strategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: imageName,
Image: image,
},
},
},
},
},
}
}
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
// such as shortly after a scaling event or the deployment is just created.
func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment) error {
return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, pollLongTimeout)
}
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
// Rolling update strategy should not be broken during a rolling update.
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment) error {
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
}
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
return testutils.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, pollLongTimeout)
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
return testutils.WaitForDeploymentRollbackCleared(c, ns, deploymentName, Poll, pollShortTimeout)
}
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods.
func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
if d.Spec.Strategy.Type != apps.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}
w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
if err != nil {
return err
}
status := d.Status
condition := func(event watch.Event) (bool, error) {
d := event.Object.(*apps.Deployment)
status = d.Status
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
if err == nil && nerr == nil {
Logf("%+v", d)
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
}
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
}
return *(d.Spec.Replicas) == d.Status.Replicas &&
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
d.Generation <= d.Status.ObservedGeneration, nil
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
return err
}
func ScaleDeployment(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, appsinternal.Kind("Deployment"), appsinternal.Resource("deployments"))
}
func RunDeployment(config testutils.DeploymentConfig) error {
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunDeployment(config)
}
func logPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet) {
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
}
func WaitForDeploymentRevision(c clientset.Interface, d *apps.Deployment, targetRevision string) error {
err := wait.PollImmediate(Poll, pollLongTimeout, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
revision := deployment.Annotations[deploymentutil.RevisionAnnotation]
return revision == targetRevision, nil
})
if err != nil {
return fmt.Errorf("error waiting for revision to become %q for deployment %q: %v", targetRevision, d.Name, err)
}
return nil
}
// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error {
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
}
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*apps.Deployment, error) {
deploymentSpec := MakeDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
deployment, err := client.AppsV1().Deployments(namespace).Create(deploymentSpec)
if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
}
Logf("Waiting deployment %q to complete", deploymentSpec.Name)
err = WaitForDeploymentComplete(client, deployment)
if err != nil {
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err)
}
return deployment, nil
}
// MakeDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *apps.Deployment {
if len(command) == 0 {
command = "trap exit TERM; while true; do sleep 1; done"
}
zero := int64(0)
deploymentName := "deployment-" + string(uuid.NewUUID())
deploymentSpec := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: namespace,
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: "write-pod",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
},
},
}
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
var volumes = make([]v1.Volume, len(pvclaims))
for index, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", index+1)
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
}
deploymentSpec.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts
deploymentSpec.Spec.Template.Spec.Volumes = volumes
if nodeSelector != nil {
deploymentSpec.Spec.Template.Spec.NodeSelector = nodeSelector
}
return deploymentSpec
}
// GetPodsForDeployment gets pods for the given deployment
func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) {
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1())
if err != nil {
return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
}
if replicaSet == nil {
return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
}
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return client.CoreV1().Pods(namespace).List(options)
}
rsList := []*apps.ReplicaSet{replicaSet}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
if err != nil {
return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
}
return podList, nil
}

View File

@ -24,21 +24,20 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/kubernetes/pkg/api/legacyscheme"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/gomega"
"github.com/onsi/gomega"
)
// ExecOptions passed to ExecWithOptions
type ExecOptions struct {
Command []string
Command []string
Namespace string
PodName string
ContainerName string
Stdin io.Reader
CaptureStdout bool
CaptureStderr bool
@ -50,7 +49,7 @@ type ExecOptions struct {
// returning stdout, stderr and error. `options` allowed for
// additional parameters to be passed.
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
Logf("ExecWithOptions %+v", options)
e2elog.Logf("ExecWithOptions %+v", options)
config, err := LoadConfig()
ExpectNoError(err, "failed to load restclient config")
@ -70,7 +69,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
Stdout: options.CaptureStdout,
Stderr: options.CaptureStderr,
TTY: tty,
}, legacyscheme.ParameterCodec)
}, scheme.ParameterCodec)
var stdout, stderr bytes.Buffer
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
@ -85,11 +84,10 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
// specified container and return stdout, stderr and error
func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName string, cmd ...string) (string, string, error) {
return f.ExecWithOptions(ExecOptions{
Command: cmd,
Namespace: f.Namespace.Name,
PodName: podName,
ContainerName: containerName,
Command: cmd,
Namespace: f.Namespace.Name,
PodName: podName,
ContainerName: containerName,
Stdin: nil,
CaptureStdout: true,
CaptureStderr: true,
@ -100,37 +98,40 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
// ExecCommandInContainer executes a command in the specified container.
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
Logf("Exec stderr: %q", stderr)
e2elog.Logf("Exec stderr: %q", stderr)
ExpectNoError(err,
"failed to execute command in pod %v, container %v: %v",
podName, containerName, err)
return stdout
}
// ExecShellInContainer executes the specified command on the pod's container.
func (f *Framework) ExecShellInContainer(podName, containerName string, cmd string) string {
return f.ExecCommandInContainer(podName, containerName, "/bin/sh", "-c", cmd)
}
func (f *Framework) ExecCommandInPod(podName string, cmd ...string) string {
func (f *Framework) execCommandInPod(podName string, cmd ...string) string {
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
ExpectNoError(err, "failed to get pod")
Expect(pod.Spec.Containers).NotTo(BeEmpty())
ExpectNoError(err, "failed to get pod %v", podName)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
}
func (f *Framework) ExecCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
func (f *Framework) execCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
ExpectNoError(err, "failed to get pod")
Expect(pod.Spec.Containers).NotTo(BeEmpty())
ExpectNoError(err, "failed to get pod %v", podName)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)
}
// ExecShellInPod executes the specified command on the pod.
func (f *Framework) ExecShellInPod(podName string, cmd string) string {
return f.ExecCommandInPod(podName, "/bin/sh", "-c", cmd)
return f.execCommandInPod(podName, "/bin/sh", "-c", cmd)
}
// ExecShellInPodWithFullOutput executes the specified command on the Pod and returns stdout, stderr and error.
func (f *Framework) ExecShellInPodWithFullOutput(podName string, cmd string) (string, string, error) {
return f.ExecCommandInPodWithFullOutput(podName, "/bin/sh", "-c", cmd)
return f.execCommandInPodWithFullOutput(podName, "/bin/sh", "-c", cmd)
}
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {

View File

@ -20,14 +20,18 @@ import (
"bytes"
"fmt"
"sync"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
// FlakeReport is a struct for managing the flake report.
type FlakeReport struct {
lock sync.RWMutex
Flakes []string `json:"flakes"`
FlakeCount int `json:"flakeCount"`
}
// NewFlakeReport returns a new flake report.
func NewFlakeReport() *FlakeReport {
return &FlakeReport{
Flakes: []string{},
@ -55,19 +59,21 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter
if desc != "" {
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
}
Logf(msg)
e2elog.Logf(msg)
f.lock.Lock()
defer f.lock.Unlock()
f.Flakes = append(f.Flakes, msg)
f.FlakeCount++
}
// GetFlakeCount returns the flake count.
func (f *FlakeReport) GetFlakeCount() int {
f.lock.RLock()
defer f.lock.RUnlock()
return f.FlakeCount
}
// PrintHumanReadable returns string of flake report.
func (f *FlakeReport) PrintHumanReadable() string {
f.lock.RLock()
defer f.lock.RUnlock()
@ -80,12 +86,14 @@ func (f *FlakeReport) PrintHumanReadable() string {
return buf.String()
}
// PrintJSON returns the summary of frake report with JSON format.
func (f *FlakeReport) PrintJSON() string {
f.lock.RLock()
defer f.lock.RUnlock()
return PrettyPrintJSON(f)
}
// SummaryKind returns the summary of flake report.
func (f *FlakeReport) SummaryKind() string {
return "FlakeReport"
}

View File

@ -42,21 +42,21 @@ import (
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
maxKubectlExecRetries = 5
// DefaultNamespaceDeletionTimeout is timeout duration for waiting for a namespace deletion.
// TODO(mikedanese): reset this to 5 minutes once #47135 is resolved.
// ref https://github.com/kubernetes/kubernetes/issues/47135
DefaultNamespaceDeletionTimeout = 10 * time.Minute
@ -75,9 +75,7 @@ type Framework struct {
ClientSet clientset.Interface
KubemarkExternalClusterClientSet clientset.Interface
InternalClientset *internalclientset.Clientset
AggregatorClient *aggregatorclient.Clientset
DynamicClient dynamic.Interface
DynamicClient dynamic.Interface
ScalesGetter scaleclient.ScalesGetter
@ -106,7 +104,7 @@ type Framework struct {
cleanupHandle CleanupActionHandle
// configuration for framework's client
Options FrameworkOptions
Options Options
// Place where various additional data is stored during test run to be printed to ReportDir,
// or stdout if ReportDir is not set once test ends.
@ -116,29 +114,32 @@ type Framework struct {
clusterAutoscalerMetricsBeforeTest metrics.Collection
}
// TestDataSummary is an interface for managing test data.
type TestDataSummary interface {
SummaryKind() string
PrintHumanReadable() string
PrintJSON() string
}
type FrameworkOptions struct {
// Options is a struct for managing test framework options.
type Options struct {
ClientQPS float32
ClientBurst int
GroupVersion *schema.GroupVersion
}
// NewFramework makes a new framework and sets up a BeforeEach/AfterEach for
// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for
// you (you can write additional before/after each functions).
func NewDefaultFramework(baseName string) *Framework {
options := FrameworkOptions{
options := Options{
ClientQPS: 20,
ClientBurst: 50,
}
return NewFramework(baseName, options, nil)
}
func NewFramework(baseName string, options FrameworkOptions, client clientset.Interface) *Framework {
// NewFramework creates a test framework.
func NewFramework(baseName string, options Options, client clientset.Interface) *Framework {
f := &Framework{
BaseName: baseName,
AddonResourceConstraints: make(map[string]ResourceConstraint),
@ -146,8 +147,8 @@ func NewFramework(baseName string, options FrameworkOptions, client clientset.In
ClientSet: client,
}
BeforeEach(f.BeforeEach)
AfterEach(f.AfterEach)
ginkgo.BeforeEach(f.BeforeEach)
ginkgo.AfterEach(f.AfterEach)
return f
}
@ -158,9 +159,9 @@ func (f *Framework) BeforeEach() {
// https://github.com/onsi/ginkgo/issues/222
f.cleanupHandle = AddCleanupAction(f.AfterEach)
if f.ClientSet == nil {
By("Creating a kubernetes client")
ginkgo.By("Creating a kubernetes client")
config, err := LoadConfig()
testDesc := CurrentGinkgoTestDescription()
testDesc := ginkgo.CurrentGinkgoTestDescription()
if len(testDesc.ComponentTexts) > 0 {
componentTexts := strings.Join(testDesc.ComponentTexts, " ")
config.UserAgent = fmt.Sprintf(
@ -180,10 +181,6 @@ func (f *Framework) BeforeEach() {
}
f.ClientSet, err = clientset.NewForConfig(config)
ExpectNoError(err)
f.InternalClientset, err = internalclientset.NewForConfig(config)
ExpectNoError(err)
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
ExpectNoError(err)
f.DynamicClient, err = dynamic.NewForConfig(config)
ExpectNoError(err)
// node.k8s.io is based on CRD, which is served only as JSON
@ -197,7 +194,7 @@ func (f *Framework) BeforeEach() {
config.GroupVersion = &schema.GroupVersion{}
}
if config.NegotiatedSerializer == nil {
config.NegotiatedSerializer = legacyscheme.Codecs
config.NegotiatedSerializer = scheme.Codecs
}
restClient, err := rest.RESTClientFor(config)
ExpectNoError(err)
@ -213,7 +210,7 @@ func (f *Framework) BeforeEach() {
}
if !f.SkipNamespaceCreation {
By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
ginkgo.By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
"e2e-framework": f.BaseName,
})
@ -222,11 +219,11 @@ func (f *Framework) BeforeEach() {
f.Namespace = namespace
if TestContext.VerifyServiceAccount {
By("Waiting for a default service account to be provisioned in namespace")
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
ExpectNoError(err)
} else {
Logf("Skipping waiting for service account")
e2elog.Logf("Skipping waiting for service account")
}
f.UniqueName = f.Namespace.GetName()
} else {
@ -254,7 +251,7 @@ func (f *Framework) BeforeEach() {
PrintVerboseLogs: false,
}, nil)
if err != nil {
Logf("Error while creating NewResourceUsageGatherer: %v", err)
e2elog.Logf("Error while creating NewResourceUsageGatherer: %v", err)
} else {
go f.gatherer.StartGatheringData()
}
@ -275,13 +272,13 @@ func (f *Framework) BeforeEach() {
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
if err != nil {
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
e2elog.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
} else {
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
if err != nil {
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
e2elog.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
} else {
Logf("Gathered ClusterAutoscaler metrics before test")
e2elog.Logf("Gathered ClusterAutoscaler metrics before test")
}
}
@ -301,9 +298,9 @@ func (f *Framework) AfterEach() {
// Whether to delete namespace is determined by 3 factors: delete-namespace flag, delete-namespace-on-failure flag and the test result
// if delete-namespace set to false, namespace will always be preserved.
// if delete-namespace is true and delete-namespace-on-failure is false, namespace will be preserved if test failed.
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !CurrentGinkgoTestDescription().Failed) {
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentGinkgoTestDescription().Failed) {
for _, ns := range f.namespacesToDelete {
By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
timeout := DefaultNamespaceDeletionTimeout
if f.NamespaceDeletionTimeout != 0 {
timeout = f.NamespaceDeletionTimeout
@ -312,15 +309,15 @@ func (f *Framework) AfterEach() {
if !apierrors.IsNotFound(err) {
nsDeletionErrors[ns.Name] = err
} else {
Logf("Namespace %v was already deleted", ns.Name)
e2elog.Logf("Namespace %v was already deleted", ns.Name)
}
}
}
} else {
if !TestContext.DeleteNamespace {
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
e2elog.Logf("Found DeleteNamespace=false, skipping namespace deletion!")
} else {
Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
e2elog.Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
}
}
@ -340,7 +337,7 @@ func (f *Framework) AfterEach() {
}()
// Print events if the test failed.
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
if ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
// Pass both unversioned client and versioned clientset, till we have removed all uses of the unversioned client.
if !f.SkipNamespaceCreation {
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
@ -348,30 +345,30 @@ func (f *Framework) AfterEach() {
}
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
By("Collecting resource usage data")
ginkgo.By("Collecting resource usage data")
summary, resourceViolationError := f.gatherer.StopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)
defer ExpectNoError(resourceViolationError)
f.TestSummaries = append(f.TestSummaries, summary)
}
if TestContext.GatherLogsSizes {
By("Gathering log sizes data")
ginkgo.By("Gathering log sizes data")
close(f.logsSizeCloseChannel)
f.logsSizeWaitGroup.Wait()
f.TestSummaries = append(f.TestSummaries, f.logsSizeVerifier.GetSummary())
}
if TestContext.GatherMetricsAfterTest != "false" {
By("Gathering metrics")
ginkgo.By("Gathering metrics")
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
if err != nil {
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
e2elog.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
} else {
received, err := grabber.Grab()
if err != nil {
Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
e2elog.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
}
(*MetricsForE2E)(&received).computeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
f.TestSummaries = append(f.TestSummaries, (*MetricsForE2E)(&received))
@ -396,6 +393,7 @@ func (f *Framework) AfterEach() {
}
}
// CreateNamespace creates a namespace for e2e testing.
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) {
createTestingNS := TestContext.CreateTestingNS
if createTestingNS == nil {
@ -407,12 +405,14 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
f.AddNamespacesToDelete(ns)
if err == nil && !f.SkipPrivilegedPSPBinding {
CreatePrivilegedPSPBinding(f, ns.Name)
createPrivilegedPSPBinding(f, ns.Name)
}
return ns, err
}
// RecordFlakeIfError records flakeness info if error happens.
// NOTE: This function is not used at any places yet, but we are in progress for https://github.com/kubernetes/kubernetes/issues/66239 which requires this. Please don't remove this.
func (f *Framework) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
f.flakeReport.RecordFlakeIfError(err, optionalDescription)
}
@ -465,20 +465,20 @@ func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a substring matcher.
func (f *Framework) TestContainerOutput(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, ContainSubstring)
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
}
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a regexp matcher.
func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, MatchRegexp)
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
}
// Write a file using kubectl exec echo <contents> > <path> via specified container
// Because of the primitive technique we're using here, we only allow ASCII alphanumeric characters
// WriteFileViaContainer writes a file using kubectl exec echo <contents> > <path> via specified container
// because of the primitive technique we're using here, we only allow ASCII alphanumeric characters
func (f *Framework) WriteFileViaContainer(podName, containerName string, path string, contents string) error {
By("writing a file in the container")
ginkgo.By("writing a file in the container")
allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
for _, c := range contents {
if !strings.ContainsRune(allowedCharacters, c) {
@ -488,41 +488,42 @@ func (f *Framework) WriteFileViaContainer(podName, containerName string, path st
command := fmt.Sprintf("echo '%s' > '%s'", contents, path)
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
if err != nil {
Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return err
}
// Read a file using kubectl exec cat <path>
// ReadFileViaContainer reads a file using kubectl exec cat <path>.
func (f *Framework) ReadFileViaContainer(podName, containerName string, path string) (string, error) {
By("reading a file in the container")
ginkgo.By("reading a file in the container")
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
if err != nil {
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return string(stdout), err
}
// CheckFileSizeViaContainer returns the list of file size under the specified path.
func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path string) (string, error) {
By("checking a file size in the container")
ginkgo.By("checking a file size in the container")
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path)
if err != nil {
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return string(stdout), err
}
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (error, *v1.Service) {
var err error = nil
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (*v1.Service, error) {
var err error
theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
if block {
err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector)))
}
return err, theService
return theService, err
}
// CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label.
@ -539,15 +540,14 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
portsFunc := func() []v1.ServicePort {
if contPort < 1 || svcPort < 1 {
return nil
} else {
return []v1.ServicePort{{
Protocol: v1.ProtocolTCP,
Port: int32(svcPort),
TargetPort: intstr.FromInt(contPort),
}}
}
return []v1.ServicePort{{
Protocol: v1.ProtocolTCP,
Port: int32(svcPort),
TargetPort: intstr.FromInt(contPort),
}}
}
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
e2elog.Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "service-for-" + appName,
@ -564,7 +564,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
return service
}
// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
labels := map[string]string{
@ -573,7 +573,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
for i, node := range nodes.Items {
// one per node, but no more than maxCount.
if i <= maxCount {
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
@ -587,6 +587,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
return labels
}
// KubeUser is a struct for managing kubernetes user info.
type KubeUser struct {
Name string `yaml:"name"`
User struct {
@ -596,6 +597,7 @@ type KubeUser struct {
} `yaml:"user"`
}
// KubeCluster is a struct for managing kubernetes cluster info.
type KubeCluster struct {
Name string `yaml:"name"`
Cluster struct {
@ -604,6 +606,7 @@ type KubeCluster struct {
} `yaml:"cluster"`
}
// KubeConfig is a struct for managing kubernetes config.
type KubeConfig struct {
Contexts []struct {
Name string `yaml:"name"`
@ -618,6 +621,7 @@ type KubeConfig struct {
Users []KubeUser `yaml:"users"`
}
// FindUser returns user info which is the specified user name.
func (kc *KubeConfig) FindUser(name string) *KubeUser {
for _, user := range kc.Users {
if user.Name == name {
@ -627,6 +631,7 @@ func (kc *KubeConfig) FindUser(name string) *KubeUser {
return nil
}
// FindCluster returns cluster info which is the specified cluster name.
func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
for _, cluster := range kc.Clusters {
if cluster.Name == name {
@ -639,19 +644,19 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
if numRetries > 0 {
Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
}
stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
if err != nil {
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
// Retry on "i/o timeout" errors
Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
continue
}
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
// Retry on "container not found" errors
Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
time.Sleep(2 * time.Second)
continue
}
@ -659,7 +664,7 @@ func kubectlExecWithRetry(namespace string, podName, containerName string, args
return stdOutBytes, stdErrBytes, err
}
err := fmt.Errorf("Failed: kubectl exec failed %d times with \"i/o timeout\". Giving up.", maxKubectlExecRetries)
err := fmt.Errorf("Failed: kubectl exec failed %d times with \"i/o timeout\". Giving up", maxKubectlExecRetries)
return nil, nil, err
}
@ -676,20 +681,20 @@ func kubectlExec(namespace string, podName, containerName string, args ...string
cmd := KubectlCmd(cmdArgs...)
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
err := cmd.Run()
return stdout.Bytes(), stderr.Bytes(), err
}
// Wrapper function for ginkgo describe. Adds namespacing.
// KubeDescribe is wrapper function for ginkgo describe. Adds namespacing.
// TODO: Support type safe tagging as well https://github.com/kubernetes/kubernetes/pull/22401.
func KubeDescribe(text string, body func()) bool {
return Describe("[k8s.io] "+text, body)
return ginkgo.Describe("[k8s.io] "+text, body)
}
// Wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(text string, body interface{}, timeout ...float64) bool {
return It(text+" [Conformance]", body, timeout...)
return ginkgo.It(text+" [Conformance]", body, timeout...)
}
// PodStateVerification represents a verification of pod state.
@ -713,12 +718,14 @@ type PodStateVerification struct {
PodName string
}
// ClusterVerification is a struct for a verification of cluster state.
type ClusterVerification struct {
client clientset.Interface
namespace *v1.Namespace // pointer rather than string, since ns isn't created until before each.
podState PodStateVerification
}
// NewClusterVerification creates a new cluster verification.
func (f *Framework) NewClusterVerification(namespace *v1.Namespace, filter PodStateVerification) *ClusterVerification {
return &ClusterVerification{
f.ClientSet,
@ -734,15 +741,14 @@ func passesPodNameFilter(pod v1.Pod, name string) bool {
func passesVerifyFilter(pod v1.Pod, verify func(p v1.Pod) (bool, error)) (bool, error) {
if verify == nil {
return true, nil
} else {
verified, err := verify(pod)
// If an error is returned, by definition, pod verification fails
if err != nil {
return false, err
} else {
return verified, nil
}
}
verified, err := verify(pod)
// If an error is returned, by definition, pod verification fails
if err != nil {
return false, err
}
return verified, nil
}
func passesPhasesFilter(pod v1.Pod, validPhases []v1.PodPhase) bool {
@ -782,7 +788,7 @@ func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Names
ns := namespace.Name
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
e2elog.Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
if len(pl.Items) == 0 || err != nil {
return pl.Items, err
}
@ -797,7 +803,7 @@ ReturnPodsSoFar:
}
passesVerify, err := passesVerifyFilter(pod, p.Verify)
if err != nil {
Logf("Error detected on %v : %v !", pod.Name, err)
e2elog.Logf("Error detected on %v : %v !", pod.Name, err)
break ReturnPodsSoFar
}
if passesVerify {
@ -818,12 +824,12 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
// Failure
if returnedErr != nil {
Logf("Cutting polling short: We got an error from the pod filtering layer.")
e2elog.Logf("Cutting polling short: We got an error from the pod filtering layer.")
// stop polling if the pod filtering returns an error. that should never happen.
// it indicates, for example, that the client is broken or something non-pod related.
return false, returnedErr
}
Logf("Found %v / %v", len(pods), atLeast)
e2elog.Logf("Found %v / %v", len(pods), atLeast)
// Success
if len(pods) >= atLeast {
@ -832,7 +838,7 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
// Keep trying...
return false, nil
})
Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
e2elog.Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
return pods, err
}
@ -855,24 +861,24 @@ func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
if len(pods) == 0 {
Failf("No pods matched the filter.")
}
Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
e2elog.Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
for _, p := range pods {
podFunc(p)
}
} else {
Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
e2elog.Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
}
return err
}
// GetLogToFileFunc is a convenience function that returns a function that have the same interface as
// Logf, but writes to a specified file.
// e2elog.Logf, but writes to a specified file.
func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) {
return func(format string, args ...interface{}) {
writer := bufio.NewWriter(file)
if _, err := fmt.Fprintf(writer, format, args...); err != nil {
Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
e2elog.Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
}
writer.Flush()
}

View File

@ -20,8 +20,12 @@ import (
"bufio"
"fmt"
"strings"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
// KubemarkResourceUsage is a struct for tracking the resource usage of kubemark.
type KubemarkResourceUsage struct {
Name string
MemoryWorkingSetInBytes uint64
@ -29,20 +33,21 @@ type KubemarkResourceUsage struct {
}
func getMasterUsageByPrefix(prefix string) (string, error) {
sshResult, err := SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
if err != nil {
return "", err
}
return sshResult.Stdout, nil
}
// GetKubemarkMasterComponentsResourceUsage returns the resource usage of kubemark which contains multiple combinations of cpu and memory usage for each pod name.
// TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework)
func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage {
result := make(map[string]*KubemarkResourceUsage)
// Get kubernetes component resource usage
sshResult, err := getMasterUsageByPrefix("kube")
if err != nil {
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
e2elog.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
return nil
}
scanner := bufio.NewScanner(strings.NewReader(sshResult))
@ -60,7 +65,7 @@ func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsag
// Get etcd resource usage
sshResult, err = getMasterUsageByPrefix("bin/etcd")
if err != nil {
Logf("Error when trying to SSH to master machine. Skipping probe")
e2elog.Logf("Error when trying to SSH to master machine. Skipping probe")
return nil
}
scanner = bufio.NewScanner(strings.NewReader(sshResult))

View File

@ -23,6 +23,8 @@ import (
"os/exec"
"path/filepath"
"strings"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
// TODO: These should really just use the GCE API client library or at least use
@ -46,9 +48,9 @@ func lookupClusterImageSources() (string, string, error) {
str = strings.Replace(str, ";", "\n", -1)
lines := strings.Split(str, "\n")
if err != nil {
Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
e2elog.Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
for _, l := range lines {
Logf(" > %s", l)
e2elog.Logf(" > %s", l)
}
}
return lines, err
@ -108,14 +110,15 @@ func lookupClusterImageSources() (string, string, error) {
return masterImg, nodeImg, nil
}
// LogClusterImageSources writes out cluster image sources.
func LogClusterImageSources() {
masterImg, nodeImg, err := lookupClusterImageSources()
if err != nil {
Logf("Cluster image sources lookup failed: %v\n", err)
e2elog.Logf("Cluster image sources lookup failed: %v\n", err)
return
}
Logf("cluster-master-image: %s", masterImg)
Logf("cluster-node-image: %s", nodeImg)
e2elog.Logf("cluster-master-image: %s", masterImg)
e2elog.Logf("cluster-node-image: %s", nodeImg)
images := map[string]string{
"master_os_image": masterImg,
@ -125,10 +128,11 @@ func LogClusterImageSources() {
outputBytes, _ := json.MarshalIndent(images, "", " ")
filePath := filepath.Join(TestContext.ReportDir, "images.json")
if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil {
Logf("cluster images sources, could not write to %q: %v", filePath, err)
e2elog.Logf("cluster images sources, could not write to %q: %v", filePath, err)
}
}
// CreateManagedInstanceGroup creates a Compute Engine managed instance group.
func CreateManagedInstanceGroup(size int64, zone, template string) error {
// TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud.
@ -145,6 +149,7 @@ func CreateManagedInstanceGroup(size int64, zone, template string) error {
return nil
}
// GetManagedInstanceGroupTemplateName returns the list of Google Compute Engine managed instance groups.
func GetManagedInstanceGroupTemplateName(zone string) (string, error) {
// TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud. Use InstanceGroupManager to get Instance Template name.
@ -167,6 +172,7 @@ func GetManagedInstanceGroupTemplateName(zone string) (string, error) {
return templateName, nil
}
// DeleteManagedInstanceGroup deletes Google Compute Engine managed instance group.
func DeleteManagedInstanceGroup(zone string) error {
// TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud.

View File

@ -1,84 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/klog"
)
const (
// GPUResourceName is the extended name of the GPU resource since v1.8
// this uses the device plugin mechanism
NVIDIAGPUResourceName = "nvidia.com/gpu"
// TODO: Parametrize it by making it a feature in TestFramework.
// so we can override the daemonset in other setups (non COS).
// GPUDevicePluginDSYAML is the official Google Device Plugin Daemonset NVIDIA GPU manifest for GKE
GPUDevicePluginDSYAML = "https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml"
)
// TODO make this generic and not linked to COS only
// NumberOfGPUs returs the number of GPUs advertised by a node
// This is based on the Device Plugin system and expected to run on a COS based node
// After the NVIDIA drivers were installed
func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
val, ok := node.Status.Capacity[NVIDIAGPUResourceName]
if !ok {
return 0
}
return val.Value()
}
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
func NVIDIADevicePlugin() *v1.Pod {
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
ExpectNoError(err)
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
Namespace: metav1.NamespaceSystem,
},
Spec: ds.Spec.Template.Spec,
}
// Remove node affinity
p.Spec.Affinity = nil
return p
}
func GetGPUDevicePluginImage() string {
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
if err != nil {
klog.Errorf("Failed to parse the device plugin image: %v", err)
return ""
}
if ds == nil {
klog.Errorf("Failed to parse the device plugin image: the extracted DaemonSet is nil")
return ""
}
if len(ds.Spec.Template.Spec.Containers) < 1 {
klog.Errorf("Failed to parse the device plugin image: cannot extract the container from YAML")
return ""
}
return ds.Spec.Template.Spec.Containers[0].Image
}

View File

@ -1,316 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"time"
batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
jobutil "k8s.io/kubernetes/pkg/controller/job"
)
const (
// How long to wait for a job to finish.
JobTimeout = 15 * time.Minute
// Job selector name
JobSelectorKey = "job"
)
// NewTestJob returns a Job which does one of several testing behaviors. notTerminate starts a Job that will run
// effectively forever. fail starts a Job that will fail immediately. succeed starts a Job that will succeed
// immediately. randomlySucceedOrFail starts a Job that will succeed or fail randomly. failOnce fails the Job the
// first time it is run and succeeds subsequently. name is the Name of the Job. RestartPolicy indicates the restart
// policy of the containers in which the Pod is running. Parallelism is the Job's parallelism, and completions is the
// Job's required number of completions.
func NewTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32, activeDeadlineSeconds *int64, backoffLimit int32) *batch.Job {
job := &batch.Job{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
TypeMeta: metav1.TypeMeta{
Kind: "Job",
},
Spec: batch.JobSpec{
ActiveDeadlineSeconds: activeDeadlineSeconds,
Parallelism: &parallelism,
Completions: &completions,
BackoffLimit: &backoffLimit,
ManualSelector: newBool(false),
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{JobSelectorKey: name},
},
Spec: v1.PodSpec{
RestartPolicy: rPol,
Volumes: []v1.Volume{
{
Name: "data",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
{
Name: "c",
Image: BusyBoxImage,
Command: []string{},
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",
Name: "data",
},
},
},
},
},
},
},
}
switch behavior {
case "notTerminate":
job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"}
case "fail":
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 1"}
case "succeed":
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
case "randomlySucceedOrFail":
// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
// Dividing by 16384 gives roughly 50/50 chance of success.
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
case "failOnce":
// Fail the first the container of the pod is run, and
// succeed the second time. Checks for file on emptydir.
// If present, succeed. If not, create but fail.
// Note that this cannot be used with RestartNever because
// it always fails the first time for a pod.
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "if [[ -r /data/foo ]] ; then exit 0 ; else touch /data/foo ; exit 1 ; fi"}
}
return job
}
// GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid.
func GetJob(c clientset.Interface, ns, name string) (*batch.Job, error) {
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
}
// CreateJob uses c to create job in namespace ns. If the returned error is nil, the returned Job is valid and has
// been created.
func CreateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
return c.BatchV1().Jobs(ns).Create(job)
}
// UpdateJob uses c to updated job in namespace ns. If the returned error is nil, the returned Job is valid and has
// been updated.
func UpdateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
return c.BatchV1().Jobs(ns).Update(job)
}
// UpdateJobFunc updates the job object. It retries if there is a conflict, throw out error if
// there is any other errors. name is the job name, updateFn is the function updating the
// job object.
func UpdateJobFunc(c clientset.Interface, ns, name string, updateFn func(job *batch.Job)) {
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
job, err := GetJob(c, ns, name)
if err != nil {
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
}
updateFn(job)
_, err = UpdateJob(c, ns, job)
if err == nil {
Logf("Successfully updated job %q", name)
return true, nil
}
if errors.IsConflict(err) {
Logf("Conflicting update to job %q, re-get and re-update: %v", name, err)
return false, nil
}
return false, fmt.Errorf("failed to update job %q: %v", name, err)
}))
}
// DeleteJob uses c to delete the Job named name in namespace ns. If the returned error is nil, the Job has been
// deleted.
func DeleteJob(c clientset.Interface, ns, name string) error {
return c.BatchV1().Jobs(ns).Delete(name, nil)
}
// GetJobPods returns a list of Pods belonging to a Job.
func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
options := metav1.ListOptions{LabelSelector: label.String()}
return c.CoreV1().Pods(ns).List(options)
}
// WaitForAllJobPodsRunning wait for all pods for the Job named JobName in namespace ns to become Running. Only use
// when pods will run for a long time, or it will be racy.
func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
pods, err := GetJobPods(c, ns, jobName)
if err != nil {
return false, err
}
count := int32(0)
for _, p := range pods.Items {
if p.Status.Phase == v1.PodRunning {
count++
}
}
return count == parallelism, nil
})
}
// WaitForJobComplete uses c to wait for completions to complete for the Job jobName in namespace ns.
func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions int32) error {
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
return curr.Status.Succeeded == completions, nil
})
}
// WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete).
func WaitForJobFinish(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
return jobutil.IsJobFinished(curr), nil
})
}
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
return wait.Poll(Poll, timeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, c := range curr.Status.Conditions {
if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
if reason == "" || reason == c.Reason {
return true, nil
}
}
}
return false, nil
})
}
// WaitForJobGone uses c to wait for up to timeout for the Job named jobName in namespace ns to be removed.
func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
return wait.Poll(Poll, timeout, func() (bool, error) {
_, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if errors.IsNotFound(err) {
return true, nil
}
return false, err
})
}
// CheckForAllJobPodsRunning uses c to check in the Job named jobName in ns is running. If the returned error is not
// nil the returned bool is true if the Job is running.
func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options)
if err != nil {
return false, err
}
count := int32(0)
for _, p := range pods.Items {
if p.Status.Phase == v1.PodRunning {
count++
}
}
return count == parallelism, nil
}
// WaitForAllJobPodsRunning wait for all pods for the Job named jobName in namespace ns
// to be deleted.
func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
pods, err := GetJobPods(c, ns, jobName)
if err != nil {
return false, err
}
return len(pods.Items) == 0, nil
})
}
func newBool(val bool) *bool {
p := new(bool)
*p = val
return p
}
type updateJobFunc func(*batch.Job)
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
jobs := c.BatchV1().Jobs(namespace)
var updateErr error
pollErr := wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(job)
if job, err = jobs.Update(job); err == nil {
Logf("Updating job %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr)
}
return job, pollErr
}
// WaitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
// a non-nil deletionTimestamp (i.e. being deleted).
func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
return curr.ObjectMeta.DeletionTimestamp != nil, nil
})
}
func JobFinishTime(finishedJob *batch.Job) metav1.Time {
var finishTime metav1.Time
for _, c := range finishedJob.Status.Conditions {
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
return c.LastTransitionTime
}
}
return finishTime
}

View File

@ -38,12 +38,13 @@ import (
dockermetrics "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/master/ports"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics"
"github.com/prometheus/common/model"
)
// KubeletMetric stores metrics scraped from the kubelet server's /metric endpoint.
// KubeletLatencyMetric stores metrics scraped from the kubelet server's /metric endpoint.
// TODO: Get some more structure around the metrics and this type
type KubeletLatencyMetric struct {
// eg: list, info, create
@ -55,7 +56,7 @@ type KubeletLatencyMetric struct {
Latency time.Duration
}
// KubeletMetricByLatency implements sort.Interface for []KubeletMetric based on
// KubeletLatencyMetrics implements sort.Interface for []KubeletMetric based on
// the latency field.
type KubeletLatencyMetrics []KubeletLatencyMetric
@ -159,6 +160,7 @@ type RuntimeOperationErrorRate struct {
TimeoutRate float64
}
// NewRuntimeOperationMonitor returns a new RuntimeOperationMonitor.
func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor {
m := &RuntimeOperationMonitor{
client: c,
@ -182,7 +184,7 @@ func (m *RuntimeOperationMonitor) GetRuntimeOperationErrorRate() map[string]Node
for node := range m.nodesRuntimeOps {
nodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node)
if err != nil {
Logf("GetRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
e2elog.Logf("GetRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
continue
}
m.nodesRuntimeOps[node] = nodeResult
@ -198,7 +200,7 @@ func (m *RuntimeOperationMonitor) GetLatestRuntimeOperationErrorRate() map[strin
oldNodeResult := m.nodesRuntimeOps[node]
curNodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node)
if err != nil {
Logf("GetLatestRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
e2elog.Logf("GetLatestRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
continue
}
for op, cur := range curNodeResult {
@ -275,7 +277,7 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
for _, m := range latencyMetrics {
if m.Latency > threshold {
badMetrics = append(badMetrics, m)
Logf("%+v", m)
e2elog.Logf("%+v", m)
}
}
return badMetrics, nil
@ -433,7 +435,7 @@ const (
rootContainerName = "/"
)
// A list of containers for which we want to collect resource usage.
// TargetContainers returns a list of containers for which we want to collect resource usage.
func TargetContainers() []string {
return []string{
rootContainerName,
@ -442,6 +444,7 @@ func TargetContainers() []string {
}
}
// ContainerResourceUsage is a structure for gathering container resource usage.
type ContainerResourceUsage struct {
Name string
Timestamp time.Time
@ -457,7 +460,10 @@ func (r *ContainerResourceUsage) isStrictlyGreaterThan(rhs *ContainerResourceUsa
return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes
}
// ResourceUsagePerContainer is map of ContainerResourceUsage
type ResourceUsagePerContainer map[string]*ContainerResourceUsage
// ResourceUsagePerNode is map of ResourceUsagePerContainer.
type ResourceUsagePerNode map[string]ResourceUsagePerContainer
func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerContainer) string {
@ -491,6 +497,7 @@ type usageDataPerContainer struct {
memWorkSetData []uint64
}
// GetKubeletHeapStats returns stats of kubelet heap.
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
if err != nil {
@ -507,16 +514,17 @@ func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error)
return strings.Join(lines[len(lines)-numLines:], "\n"), nil
}
// PrintAllKubeletPods outputs status of all kubelet pods into log.
func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
podList, err := GetKubeletPods(c, nodeName)
if err != nil {
Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err)
e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err)
return
}
for _, p := range podList.Items {
Logf("%v from %v started at %v (%d container statuses recorded)", p.Name, p.Namespace, p.Status.StartTime, len(p.Status.ContainerStatuses))
e2elog.Logf("%v from %v started at %v (%d container statuses recorded)", p.Name, p.Namespace, p.Status.StartTime, len(p.Status.ContainerStatuses))
for _, c := range p.Status.ContainerStatuses {
Logf("\tContainer %v ready: %v, restart count %v",
e2elog.Logf("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
@ -576,7 +584,7 @@ func (r *resourceCollector) Stop() {
func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.ContainerStats) {
summary, err := getNodeStatsSummary(r.client, r.node)
if err != nil {
Logf("Error getting node stats summary on %q, err: %v", r.node, err)
e2elog.Logf("Error getting node stats summary on %q, err: %v", r.node, err)
return
}
cStatsMap := getSystemContainerStats(summary)
@ -585,7 +593,7 @@ func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.Container
for _, name := range r.containers {
cStats, ok := cStatsMap[name]
if !ok {
Logf("Missing info/stats for container %q on node %q", name, r.node)
e2elog.Logf("Missing info/stats for container %q on node %q", name, r.node)
return
}
@ -661,6 +669,7 @@ type ResourceMonitor struct {
collectors map[string]*resourceCollector
}
// NewResourceMonitor returns a new ResourceMonitor.
func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
return &ResourceMonitor{
containers: containerNames,
@ -669,6 +678,7 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI
}
}
// Start starts collectors.
func (r *ResourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes
nodes, err := r.client.CoreV1().Nodes().List(metav1.ListOptions{})
@ -683,26 +693,31 @@ func (r *ResourceMonitor) Start() {
}
}
// Stop stops collectors.
func (r *ResourceMonitor) Stop() {
for _, collector := range r.collectors {
collector.Stop()
}
}
// Reset resets collectors.
func (r *ResourceMonitor) Reset() {
for _, collector := range r.collectors {
collector.Reset()
}
}
// LogLatest outputs the latest resource usage into log.
func (r *ResourceMonitor) LogLatest() {
summary, err := r.GetLatest()
if err != nil {
Logf("%v", err)
e2elog.Logf("%v", err)
}
Logf("%s", r.FormatResourceUsage(summary))
e2elog.Logf("%s", r.FormatResourceUsage(summary))
}
// FormatResourceUsage returns the formatted string for LogLatest().
// TODO(oomichi): This can be made to local function after making test/e2e/node/kubelet_perf.go use LogLatest directly instead.
func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
summary := []string{}
for node, usage := range s {
@ -711,6 +726,7 @@ func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
return strings.Join(summary, "\n")
}
// GetLatest returns the latest resource usage.
func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
result := make(ResourceUsagePerNode)
errs := []error{}
@ -725,6 +741,7 @@ func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
return result, utilerrors.NewAggregate(errs)
}
// GetMasterNodeLatest returns the latest resource usage of master and node.
func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode) ResourceUsagePerNode {
result := make(ResourceUsagePerNode)
var masterUsage ResourceUsagePerContainer
@ -767,6 +784,7 @@ type ContainersCPUSummary map[string]map[float64]float64
// ContainersCPUSummary map.
type NodesCPUSummary map[string]ContainersCPUSummary
// FormatCPUSummary returns the string of human-readable CPU summary from the specified summary data.
func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
// Example output for a node (the percentiles may differ):
// CPU usage of containers on node "e2e-test-foo-node-0vj7":
@ -804,11 +822,13 @@ func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
return strings.Join(summaryStrings, "\n")
}
// LogCPUSummary outputs summary of CPU into log.
func (r *ResourceMonitor) LogCPUSummary() {
summary := r.GetCPUSummary()
Logf("%s", r.FormatCPUSummary(summary))
e2elog.Logf("%s", r.FormatCPUSummary(summary))
}
// GetCPUSummary returns summary of CPU.
func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
result := make(NodesCPUSummary)
for nodeName, collector := range r.collectors {
@ -821,6 +841,7 @@ func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
return result
}
// GetMasterNodeCPUSummary returns summary of master node CPUs.
func (r *ResourceMonitor) GetMasterNodeCPUSummary(summaryPerNode NodesCPUSummary) NodesCPUSummary {
result := make(NodesCPUSummary)
var masterSummary ContainersCPUSummary

View File

@ -0,0 +1,37 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"fmt"
"time"
"github.com/onsi/ginkgo"
)
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
// Logf logs the info.
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}

View File

@ -26,6 +26,8 @@ import (
"time"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
const (
@ -75,16 +77,20 @@ type LogsSizeVerifier struct {
workers []*LogSizeGatherer
}
// SingleLogSummary is a structure for handling average generation rate and number of probes.
type SingleLogSummary struct {
AverageGenerationRate int
NumberOfProbes int
}
// LogSizeDataTimeseries is map of timestamped size.
type LogSizeDataTimeseries map[string]map[string][]TimestampedSize
// LogsSizeDataSummary is map of log summary.
// node -> file -> data
type LogsSizeDataSummary map[string]map[string]SingleLogSummary
// PrintHumanReadable returns string of log size data summary.
// TODO: make sure that we don't need locking here
func (s *LogsSizeDataSummary) PrintHumanReadable() string {
buf := &bytes.Buffer{}
@ -100,14 +106,17 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
return buf.String()
}
// PrintJSON returns the summary of log size data with JSON format.
func (s *LogsSizeDataSummary) PrintJSON() string {
return PrettyPrintJSON(*s)
}
// SummaryKind returns the summary of log size data summary.
func (s *LogsSizeDataSummary) SummaryKind() string {
return "LogSizeSummary"
}
// LogsSizeData is a structure for handling timeseries of log size data and lock.
type LogsSizeData struct {
data LogSizeDataTimeseries
lock sync.Mutex
@ -133,7 +142,7 @@ func prepareData(masterAddress string, nodeAddresses []string) *LogsSizeData {
}
}
func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int) {
func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int) {
d.lock.Lock()
defer d.lock.Unlock()
d.data[ip][path] = append(
@ -147,7 +156,7 @@ func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := NodeSSHHosts(c)
nodeAddresses, err := e2essh.NodeSSHHosts(c)
ExpectNoError(err)
masterAddress := GetMasterHost() + ":22"
@ -197,26 +206,27 @@ func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
}
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
func (v *LogsSizeVerifier) Run() {
v.workChannel <- WorkItem{
ip: v.masterAddress,
func (s *LogsSizeVerifier) Run() {
s.workChannel <- WorkItem{
ip: s.masterAddress,
paths: masterLogsToCheck,
backoffMultiplier: 1,
}
for _, node := range v.nodeAddresses {
v.workChannel <- WorkItem{
for _, node := range s.nodeAddresses {
s.workChannel <- WorkItem{
ip: node,
paths: nodeLogsToCheck,
backoffMultiplier: 1,
}
}
for _, worker := range v.workers {
for _, worker := range s.workers {
go worker.Run()
}
<-v.stopChannel
v.wg.Wait()
<-s.stopChannel
s.wg.Wait()
}
// Run starts log size gathering.
func (g *LogSizeGatherer) Run() {
for g.Work() {
}
@ -242,16 +252,16 @@ func (g *LogSizeGatherer) Work() bool {
return false
case workItem = <-g.workChannel:
}
sshResult, err := SSH(
sshResult, err := e2essh.SSH(
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
workItem.ip,
TestContext.Provider,
)
if err != nil {
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
e2elog.Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
// In case of repeated error give up.
if workItem.backoffMultiplier >= 128 {
Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
e2elog.Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
g.wg.Done()
return false
}
@ -267,10 +277,10 @@ func (g *LogSizeGatherer) Work() bool {
path := results[i]
size, err := strconv.Atoi(results[i+1])
if err != nil {
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
e2elog.Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
continue
}
g.data.AddNewData(workItem.ip, path, now, size)
g.data.addNewData(workItem.ip, path, now, size)
}
go g.pushWorkItem(workItem)
return true

View File

@ -16,7 +16,7 @@ limitations under the License.
package metrics
// ClusterAutoscalerMetrics is metrics for cluster autoscaller
// ClusterAutoscalerMetrics is metrics for cluster autoscaler
type ClusterAutoscalerMetrics Metrics
// Equal returns true if all metrics are the same as the arguments.

View File

@ -36,7 +36,9 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
schedulermetric "k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/util/system"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
@ -65,31 +67,28 @@ const (
caFunctionMetricLabel = "function"
)
// MetricsForE2E is metrics collection of components.
type MetricsForE2E metrics.Collection
func (m *MetricsForE2E) filterMetrics() {
interestingAPIServerMetrics := make(metrics.APIServerMetrics)
for _, metric := range InterestingAPIServerMetrics {
interestingAPIServerMetrics[metric] = (*m).APIServerMetrics[metric]
apiServerMetrics := make(metrics.APIServerMetrics)
for _, metric := range interestingAPIServerMetrics {
apiServerMetrics[metric] = (*m).APIServerMetrics[metric]
}
interestingControllerManagerMetrics := make(metrics.ControllerManagerMetrics)
for _, metric := range InterestingControllerManagerMetrics {
interestingControllerManagerMetrics[metric] = (*m).ControllerManagerMetrics[metric]
controllerManagerMetrics := make(metrics.ControllerManagerMetrics)
for _, metric := range interestingControllerManagerMetrics {
controllerManagerMetrics[metric] = (*m).ControllerManagerMetrics[metric]
}
interestingClusterAutoscalerMetrics := make(metrics.ClusterAutoscalerMetrics)
for _, metric := range InterestingClusterAutoscalerMetrics {
interestingClusterAutoscalerMetrics[metric] = (*m).ClusterAutoscalerMetrics[metric]
}
interestingKubeletMetrics := make(map[string]metrics.KubeletMetrics)
kubeletMetrics := make(map[string]metrics.KubeletMetrics)
for kubelet, grabbed := range (*m).KubeletMetrics {
interestingKubeletMetrics[kubelet] = make(metrics.KubeletMetrics)
for _, metric := range InterestingKubeletMetrics {
interestingKubeletMetrics[kubelet][metric] = grabbed[metric]
kubeletMetrics[kubelet] = make(metrics.KubeletMetrics)
for _, metric := range interestingKubeletMetrics {
kubeletMetrics[kubelet][metric] = grabbed[metric]
}
}
(*m).APIServerMetrics = interestingAPIServerMetrics
(*m).ControllerManagerMetrics = interestingControllerManagerMetrics
(*m).KubeletMetrics = interestingKubeletMetrics
(*m).APIServerMetrics = apiServerMetrics
(*m).ControllerManagerMetrics = controllerManagerMetrics
(*m).KubeletMetrics = kubeletMetrics
}
func printSample(sample *model.Sample) string {
@ -112,21 +111,22 @@ func printSample(sample *model.Sample) string {
return fmt.Sprintf("[%v] = %v", strings.Join(buf, ","), sample.Value)
}
// PrintHumanReadable returns e2e metrics with JSON format.
func (m *MetricsForE2E) PrintHumanReadable() string {
buf := bytes.Buffer{}
for _, interestingMetric := range InterestingAPIServerMetrics {
for _, interestingMetric := range interestingAPIServerMetrics {
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
for _, sample := range (*m).APIServerMetrics[interestingMetric] {
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
}
}
for _, interestingMetric := range InterestingControllerManagerMetrics {
for _, interestingMetric := range interestingControllerManagerMetrics {
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
for _, sample := range (*m).ControllerManagerMetrics[interestingMetric] {
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
}
}
for _, interestingMetric := range InterestingClusterAutoscalerMetrics {
for _, interestingMetric := range interestingClusterAutoscalerMetrics {
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
for _, sample := range (*m).ClusterAutoscalerMetrics[interestingMetric] {
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
@ -134,7 +134,7 @@ func (m *MetricsForE2E) PrintHumanReadable() string {
}
for kubelet, grabbed := range (*m).KubeletMetrics {
buf.WriteString(fmt.Sprintf("For %v:\n", kubelet))
for _, interestingMetric := range InterestingKubeletMetrics {
for _, interestingMetric := range interestingKubeletMetrics {
buf.WriteString(fmt.Sprintf("\tFor %v:\n", interestingMetric))
for _, sample := range grabbed[interestingMetric] {
buf.WriteString(fmt.Sprintf("\t\t%v\n", printSample(sample)))
@ -144,18 +144,20 @@ func (m *MetricsForE2E) PrintHumanReadable() string {
return buf.String()
}
// PrintJSON returns e2e metrics with JSON format.
func (m *MetricsForE2E) PrintJSON() string {
m.filterMetrics()
return PrettyPrintJSON(m)
}
// SummaryKind returns the summary of e2e metrics.
func (m *MetricsForE2E) SummaryKind() string {
return "MetricsForE2E"
}
var SchedulingLatencyMetricName = model.LabelValue(schedulermetric.SchedulerSubsystem + "_" + schedulermetric.SchedulingLatencyName)
var schedulingLatencyMetricName = model.LabelValue(schedulermetric.SchedulerSubsystem + "_" + schedulermetric.SchedulingLatencyName)
var InterestingAPIServerMetrics = []string{
var interestingAPIServerMetrics = []string{
"apiserver_request_total",
// TODO(krzysied): apiserver_request_latencies_summary is a deprecated metric.
// It should be replaced with new metric.
@ -163,7 +165,7 @@ var InterestingAPIServerMetrics = []string{
"apiserver_init_events_total",
}
var InterestingControllerManagerMetrics = []string{
var interestingControllerManagerMetrics = []string{
"garbage_collector_attempt_to_delete_queue_latency",
"garbage_collector_attempt_to_delete_work_duration",
"garbage_collector_attempt_to_orphan_queue_latency",
@ -183,24 +185,21 @@ var InterestingControllerManagerMetrics = []string{
"namespace_work_duration_count",
}
var InterestingKubeletMetrics = []string{
"kubelet_container_manager_latency_microseconds",
"kubelet_docker_errors",
var interestingKubeletMetrics = []string{
"kubelet_docker_operations_errors_total",
"kubelet_docker_operations_duration_seconds",
"kubelet_generate_pod_status_latency_microseconds",
"kubelet_pod_start_duration_seconds",
"kubelet_pod_worker_duration_seconds",
"kubelet_pod_worker_start_duration_seconds",
"kubelet_sync_pods_latency_microseconds",
}
var InterestingClusterAutoscalerMetrics = []string{
var interestingClusterAutoscalerMetrics = []string{
"function_duration_seconds",
"errors_total",
"evicted_pods_total",
}
// Dashboard metrics
// LatencyMetric is a struct for dashboard metrics.
type LatencyMetric struct {
Perc50 time.Duration `json:"Perc50"`
Perc90 time.Duration `json:"Perc90"`
@ -208,6 +207,7 @@ type LatencyMetric struct {
Perc100 time.Duration `json:"Perc100"`
}
// PodStartupLatency is a struct for managing latency of pod startup.
type PodStartupLatency struct {
CreateToScheduleLatency LatencyMetric `json:"createToScheduleLatency"`
ScheduleToRunLatency LatencyMetric `json:"scheduleToRunLatency"`
@ -216,18 +216,22 @@ type PodStartupLatency struct {
E2ELatency LatencyMetric `json:"e2eLatency"`
}
// SummaryKind returns the summary of pod startup latency.
func (l *PodStartupLatency) SummaryKind() string {
return "PodStartupLatency"
}
// PrintHumanReadable returns pod startup letency with JSON format.
func (l *PodStartupLatency) PrintHumanReadable() string {
return PrettyPrintJSON(l)
}
// PrintJSON returns pod startup letency with JSON format.
func (l *PodStartupLatency) PrintJSON() string {
return PrettyPrintJSON(PodStartupLatencyToPerfData(l))
}
// SchedulingMetrics is a struct for managing scheduling metrics.
type SchedulingMetrics struct {
PredicateEvaluationLatency LatencyMetric `json:"predicateEvaluationLatency"`
PriorityEvaluationLatency LatencyMetric `json:"priorityEvaluationLatency"`
@ -239,23 +243,28 @@ type SchedulingMetrics struct {
ThroughputPerc99 float64 `json:"throughputPerc99"`
}
// SummaryKind returns the summary of scheduling metrics.
func (l *SchedulingMetrics) SummaryKind() string {
return "SchedulingMetrics"
}
// PrintHumanReadable returns scheduling metrics with JSON format.
func (l *SchedulingMetrics) PrintHumanReadable() string {
return PrettyPrintJSON(l)
}
// PrintJSON returns scheduling metrics with JSON format.
func (l *SchedulingMetrics) PrintJSON() string {
return PrettyPrintJSON(l)
}
// Histogram is a struct for managing histogram.
type Histogram struct {
Labels map[string]string `json:"labels"`
Buckets map[string]int `json:"buckets"`
}
// HistogramVec is an array of Histogram.
type HistogramVec []Histogram
func newHistogram(labels map[string]string) *Histogram {
@ -265,6 +274,7 @@ func newHistogram(labels map[string]string) *Histogram {
}
}
// EtcdMetrics is a struct for managing etcd metrics.
type EtcdMetrics struct {
BackendCommitDuration HistogramVec `json:"backendCommitDuration"`
SnapshotSaveTotalDuration HistogramVec `json:"snapshotSaveTotalDuration"`
@ -282,24 +292,29 @@ func newEtcdMetrics() *EtcdMetrics {
}
}
// SummaryKind returns the summary of etcd metrics.
func (l *EtcdMetrics) SummaryKind() string {
return "EtcdMetrics"
}
// PrintHumanReadable returns etcd metrics with JSON format.
func (l *EtcdMetrics) PrintHumanReadable() string {
return PrettyPrintJSON(l)
}
// PrintJSON returns etcd metrics with JSON format.
func (l *EtcdMetrics) PrintJSON() string {
return PrettyPrintJSON(l)
}
// EtcdMetricsCollector is a struct for managing etcd metrics collector.
type EtcdMetricsCollector struct {
stopCh chan struct{}
wg *sync.WaitGroup
metrics *EtcdMetrics
}
// NewEtcdMetricsCollector creates a new etcd metrics collector.
func NewEtcdMetricsCollector() *EtcdMetricsCollector {
return &EtcdMetricsCollector{
stopCh: make(chan struct{}),
@ -311,12 +326,12 @@ func NewEtcdMetricsCollector() *EtcdMetricsCollector {
func getEtcdMetrics() ([]*model.Sample, error) {
// Etcd is only exposed on localhost level. We are using ssh method
if TestContext.Provider == "gke" || TestContext.Provider == "eks" {
Logf("Not grabbing etcd metrics through master SSH: unsupported for %s", TestContext.Provider)
e2elog.Logf("Not grabbing etcd metrics through master SSH: unsupported for %s", TestContext.Provider)
return nil, nil
}
cmd := "curl http://localhost:2379/metrics"
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
sshResult, err := e2essh.SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
if err != nil || sshResult.Code != 0 {
return nil, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
}
@ -349,7 +364,7 @@ func (mc *EtcdMetricsCollector) StartCollecting(interval time.Duration) {
case <-time.After(interval):
dbSize, err := getEtcdDatabaseSize()
if err != nil {
Logf("Failed to collect etcd database size")
e2elog.Logf("Failed to collect etcd database size")
continue
}
mc.metrics.MaxDatabaseSize = math.Max(mc.metrics.MaxDatabaseSize, dbSize)
@ -360,6 +375,7 @@ func (mc *EtcdMetricsCollector) StartCollecting(interval time.Duration) {
}()
}
// StopAndSummarize stops etcd metrics collector and summarizes the metrics.
func (mc *EtcdMetricsCollector) StopAndSummarize() error {
close(mc.stopCh)
mc.wg.Wait()
@ -384,17 +400,12 @@ func (mc *EtcdMetricsCollector) StopAndSummarize() error {
return nil
}
// GetMetrics returns metrics of etcd metrics collector.
func (mc *EtcdMetricsCollector) GetMetrics() *EtcdMetrics {
return mc.metrics
}
type SaturationTime struct {
TimeToSaturate time.Duration `json:"timeToSaturate"`
NumberOfNodes int `json:"numberOfNodes"`
NumberOfPods int `json:"numberOfPods"`
Throughput float32 `json:"throughput"`
}
// APICall is a struct for managing API call.
type APICall struct {
Resource string `json:"resource"`
Subresource string `json:"subresource"`
@ -404,20 +415,24 @@ type APICall struct {
Count int `json:"count"`
}
// APIResponsiveness is a struct for managing multiple API calls.
type APIResponsiveness struct {
APICalls []APICall `json:"apicalls"`
}
// SummaryKind returns the summary of API responsiveness.
func (a *APIResponsiveness) SummaryKind() string {
return "APIResponsiveness"
}
// PrintHumanReadable returns metrics with JSON format.
func (a *APIResponsiveness) PrintHumanReadable() string {
return PrettyPrintJSON(a)
}
// PrintJSON returns metrics of PerfData(50, 90 and 99th percentiles) with JSON format.
func (a *APIResponsiveness) PrintJSON() string {
return PrettyPrintJSON(ApiCallToPerfData(a))
return PrettyPrintJSON(APICallToPerfData(a))
}
func (a *APIResponsiveness) Len() int { return len(a.APICalls) }
@ -525,7 +540,7 @@ func readLatencyMetrics(c clientset.Interface) (*APIResponsiveness, error) {
return &a, err
}
// Prints top five summary metrics for request types with latency and returns
// HighLatencyRequests prints top five summary metrics for request types with latency and returns
// number of such request types above threshold. We use a higher threshold for
// list calls if nodeCount is above a given threshold (i.e. cluster is big).
func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIResponsiveness, error) {
@ -559,13 +574,13 @@ func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIRespons
if isBad {
prefix = "WARNING "
}
Logf("%vTop latency metric: %+v", prefix, metrics.APICalls[i])
e2elog.Logf("%vTop latency metric: %+v", prefix, metrics.APICalls[i])
}
}
return badMetrics, metrics, nil
}
// Verifies whether 50, 90 and 99th percentiles of a latency metric are
// VerifyLatencyWithinThreshold verifies whether 50, 90 and 99th percentiles of a latency metric are
// within the expected threshold.
func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName string) error {
if actual.Perc50 > threshold.Perc50 {
@ -580,9 +595,9 @@ func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName st
return nil
}
// Resets latency metrics in apiserver.
// ResetMetrics resets latency metrics in apiserver.
func ResetMetrics(c clientset.Interface) error {
Logf("Resetting latency metrics in apiserver...")
e2elog.Logf("Resetting latency metrics in apiserver...")
body, err := c.CoreV1().RESTClient().Delete().AbsPath("/metrics").DoRaw()
if err != nil {
return err
@ -638,12 +653,12 @@ func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error
} else {
// If master is not registered fall back to old method of using SSH.
if TestContext.Provider == "gke" || TestContext.Provider == "eks" {
Logf("Not grabbing scheduler metrics through master SSH: unsupported for %s", TestContext.Provider)
e2elog.Logf("Not grabbing scheduler metrics through master SSH: unsupported for %s", TestContext.Provider)
return "", nil
}
cmd := "curl -X " + opUpper + " http://localhost:10251/metrics"
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
sshResult, err := e2essh.SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
if err != nil || sshResult.Code != 0 {
return "", fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
}
@ -666,11 +681,11 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
}
for _, sample := range samples {
if sample.Metric[model.MetricNameLabel] != SchedulingLatencyMetricName {
if sample.Metric[model.MetricNameLabel] != schedulingLatencyMetricName {
continue
}
var metric *LatencyMetric = nil
var metric *LatencyMetric
switch sample.Metric[schedulermetric.OperationLabel] {
case schedulermetric.PredicateEvaluation:
metric = &result.PredicateEvaluationLatency
@ -694,7 +709,7 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
return &result, nil
}
// Verifies (currently just by logging them) the scheduling latencies.
// VerifySchedulerLatency verifies (currently just by logging them) the scheduling latencies.
func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
latency, err := getSchedulingLatency(c)
if err != nil {
@ -703,6 +718,7 @@ func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
return latency, nil
}
// ResetSchedulerMetrics sends a DELETE request to kube-scheduler for resetting metrics.
func ResetSchedulerMetrics(c clientset.Interface) error {
responseText, err := sendRestRequestToScheduler(c, "DELETE")
if err != nil {
@ -732,15 +748,16 @@ func convertSampleToBucket(sample *model.Sample, h *HistogramVec) {
hist.Buckets[string(sample.Metric["le"])] = int(sample.Value)
}
// PrettyPrintJSON converts metrics to JSON format.
func PrettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{}
if err := json.NewEncoder(output).Encode(metrics); err != nil {
Logf("Error building encoder: %v", err)
e2elog.Logf("Error building encoder: %v", err)
return ""
}
formatted := &bytes.Buffer{}
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
Logf("Error indenting: %v", err)
e2elog.Logf("Error indenting: %v", err)
return ""
}
return string(formatted.Bytes())
@ -778,12 +795,14 @@ type PodLatencyData struct {
Latency time.Duration
}
// LatencySlice is an array of PodLatencyData which encapsulates pod startup latency information.
type LatencySlice []PodLatencyData
func (a LatencySlice) Len() int { return len(a) }
func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency }
// ExtractLatencyMetrics returns latency metrics for each percentile(50th, 90th and 99th).
func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
length := len(latencies)
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
@ -801,17 +820,18 @@ func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLate
}
for _, l := range latencyData {
if l.Latency > NodeStartupThreshold {
HighLatencyKubeletOperations(c, 1*time.Second, l.Node, Logf)
HighLatencyKubeletOperations(c, 1*time.Second, l.Node, e2elog.Logf)
}
}
Logf("Approx throughput: %v pods/min",
e2elog.Logf("Approx throughput: %v pods/min",
float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes()))
}
// PrintLatencies outputs latencies to log with readable format.
func PrintLatencies(latencies []PodLatencyData, header string) {
metrics := ExtractLatencyMetrics(latencies)
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
e2elog.Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
e2elog.Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
}
func (m *MetricsForE2E) computeClusterAutoscalerMetricsDelta(before metrics.Collection) {

View File

@ -26,7 +26,7 @@ import (
"strings"
"time"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -38,15 +38,20 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
EndpointHttpPort = 8080
EndpointUdpPort = 8081
TestContainerHttpPort = 8080
ClusterHttpPort = 80
ClusterUdpPort = 90
// EndpointHTTPPort is an endpoint HTTP port for testing.
EndpointHTTPPort = 8080
// EndpointUDPPort is an endpoint UDP port for testing.
EndpointUDPPort = 8081
testContainerHTTPPort = 8080
// ClusterHTTPPort is a cluster HTTP port for testing.
ClusterHTTPPort = 80
// ClusterUDPPort is a cluster UDP port for testing.
ClusterUDPPort = 90
testPodName = "test-container-pod"
hostTestPodName = "host-test-container-pod"
nodePortServiceName = "node-port-service"
@ -59,30 +64,34 @@ const (
testTries = 30
// Maximum number of pods in a test, to make test work in large clusters.
maxNetProxyPodsCount = 10
// Number of checks to hit a given set of endpoints when enable session affinity.
// SessionAffinityChecks is number of checks to hit a given set of endpoints when enable session affinity.
SessionAffinityChecks = 10
// RegexIPv4 is a regex to match IPv4 addresses
RegexIPv4 = "(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)"
// RegexIPv6 is a regex to match IPv6 addresses
RegexIPv6 = "(?:(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){6})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:::(?:(?:(?:[0-9a-fA-F]{1,4})):){5})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){4})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,1}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){3})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,2}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){2})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,3}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:[0-9a-fA-F]{1,4})):)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,4}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,5}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,6}(?:(?:[0-9a-fA-F]{1,4})))?::))))"
)
var NetexecImageName = imageutils.GetE2EImage(imageutils.Netexec)
var netexecImageName = imageutils.GetE2EImage(imageutils.Netexec)
// NewNetworkingTestConfig creates and sets up a new test config helper.
func NewNetworkingTestConfig(f *Framework) *NetworkingTestConfig {
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name, HostNetwork: true}
By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
ginkgo.By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
config.setup(getServiceSelector())
return config
}
// NewNetworkingTestNodeE2EConfig creates and sets up a new test config helper for Node E2E.
// NewCoreNetworkingTestConfig creates and sets up a new test config helper for Node E2E.
func NewCoreNetworkingTestConfig(f *Framework, hostNetwork bool) *NetworkingTestConfig {
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name, HostNetwork: hostNetwork}
By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
ginkgo.By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
config.setupCore(getServiceSelector())
return config
}
func getServiceSelector() map[string]string {
By("creating a selector")
ginkgo.By("creating a selector")
selectorName := "selector-" + string(uuid.NewUUID())
serviceSelector := map[string]string{
selectorName: "true",
@ -124,19 +133,21 @@ type NetworkingTestConfig struct {
// External ip of first node for use in nodePort testing.
NodeIP string
// The http/udp nodePorts of the Service.
NodeHttpPort int
NodeUdpPort int
NodeHTTPPort int
NodeUDPPort int
// The kubernetes namespace within which all resources for this
// config are created
Namespace string
}
// DialFromEndpointContainer executes a curl via kubectl exec in an endpoint container.
func (config *NetworkingTestConfig) DialFromEndpointContainer(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) {
config.DialFromContainer(protocol, config.EndpointPods[0].Status.PodIP, targetIP, EndpointHttpPort, targetPort, maxTries, minTries, expectedEps)
config.DialFromContainer(protocol, config.EndpointPods[0].Status.PodIP, targetIP, EndpointHTTPPort, targetPort, maxTries, minTries, expectedEps)
}
// DialFromTestContainer executes a curl via kubectl exec in a test container.
func (config *NetworkingTestConfig) DialFromTestContainer(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) {
config.DialFromContainer(protocol, config.TestContainerPod.Status.PodIP, targetIP, TestContainerHttpPort, targetPort, maxTries, minTries, expectedEps)
config.DialFromContainer(protocol, config.TestContainerPod.Status.PodIP, targetIP, testContainerHTTPPort, targetPort, maxTries, minTries, expectedEps)
}
// diagnoseMissingEndpoints prints debug information about the endpoints that
@ -147,10 +158,10 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
if foundEndpoints.Has(e.Name) {
continue
}
Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
e2elog.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
desc, _ := RunKubectl(
"describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
Logf(desc)
e2elog.Logf(desc)
}
}
@ -163,7 +174,7 @@ func (config *NetworkingTestConfig) EndpointHostnames() sets.String {
return expectedEps
}
// DialFromContainers executes a curl via kubectl exec in a test container,
// DialFromContainer executes a curl via kubectl exec in a test container,
// which might then translate to a tcp or udp request based on the protocol
// argument in the url.
// - minTries is the minimum number of curl attempts required before declaring
@ -176,8 +187,8 @@ func (config *NetworkingTestConfig) EndpointHostnames() sets.String {
// maxTries == minTries will confirm that we see the expected endpoints and no
// more for maxTries. Use this if you want to eg: fail a readiness check on a
// pod and confirm it doesn't show up as an endpoint.
func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, targetIP string, containerHttpPort, targetPort, maxTries, minTries int, expectedEps sets.String) {
ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHttpPort))
func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, targetIP string, containerHTTPPort, targetPort, maxTries, minTries int, expectedEps sets.String) {
ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHTTPPort))
// The current versions of curl included in CentOS and RHEL distros
// misinterpret square brackets around IPv6 as globbing, so use the -g
// argument to disable globbing to handle the IPv6 case.
@ -195,11 +206,11 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
// A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
// we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
} else {
var output map[string][]string
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
e2elog.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
cmd, config.HostTestContainerPod.Name, stdout, err)
continue
}
@ -211,7 +222,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
}
}
}
Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
e2elog.Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
// Check against i+1 so we exit if minTries == maxTries.
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
@ -225,8 +236,9 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
}
// GetEndpointsFromTestContainer executes a curl via kubectl exec in a test container.
func (config *NetworkingTestConfig) GetEndpointsFromTestContainer(protocol, targetIP string, targetPort, tries int) (sets.String, error) {
return config.GetEndpointsFromContainer(protocol, config.TestContainerPod.Status.PodIP, targetIP, TestContainerHttpPort, targetPort, tries)
return config.GetEndpointsFromContainer(protocol, config.TestContainerPod.Status.PodIP, targetIP, testContainerHTTPPort, targetPort, tries)
}
// GetEndpointsFromContainer executes a curl via kubectl exec in a test container,
@ -234,8 +246,8 @@ func (config *NetworkingTestConfig) GetEndpointsFromTestContainer(protocol, targ
// in the url. It returns all different endpoints from multiple retries.
// - tries is the number of curl attempts. If this many attempts pass and
// we don't see any endpoints, the test fails.
func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containerIP, targetIP string, containerHttpPort, targetPort, tries int) (sets.String, error) {
ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHttpPort))
func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containerIP, targetIP string, containerHTTPPort, targetPort, tries int) (sets.String, error) {
ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHTTPPort))
// The current versions of curl included in CentOS and RHEL distros
// misinterpret square brackets around IPv6 as globbing, so use the -g
// argument to disable globbing to handle the IPv6 case.
@ -253,12 +265,12 @@ func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containe
// A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
// we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
} else {
Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod)
e2elog.Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod)
var output map[string][]string
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
e2elog.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
cmd, config.HostTestContainerPod.Name, stdout, err)
continue
}
@ -312,7 +324,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
// A failure to exec command counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
// we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
} else {
trimmed := strings.TrimSpace(stdout)
if trimmed != "" {
@ -322,11 +334,11 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
// Check against i+1 so we exit if minTries == maxTries.
if eps.Equal(expectedEps) && i+1 >= minTries {
Logf("Found all expected endpoints: %+v", eps.List())
e2elog.Logf("Found all expected endpoints: %+v", eps.List())
return
}
Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List())
e2elog.Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List())
// TODO: get rid of this delay #36281
time.Sleep(hitEndpointRetryDelay)
@ -341,17 +353,17 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
// doesn't match the expected string.
func (config *NetworkingTestConfig) GetSelfURL(port int32, path string, expected string) {
cmd := fmt.Sprintf("curl -i -q -s --connect-timeout 1 http://localhost:%d%s", port, path)
By(fmt.Sprintf("Getting kube-proxy self URL %s", path))
ginkgo.By(fmt.Sprintf("Getting kube-proxy self URL %s", path))
config.executeCurlCmd(cmd, expected)
}
// GetSelfStatusCode executes a curl against the given path via kubectl exec into a
// GetSelfURLStatusCode executes a curl against the given path via kubectl exec into a
// test container running with host networking, and fails if the returned status
// code doesn't match the expected string.
func (config *NetworkingTestConfig) GetSelfURLStatusCode(port int32, path string, expected string) {
// check status code
cmd := fmt.Sprintf("curl -o /dev/null -i -q -s -w %%{http_code} --connect-timeout 1 http://localhost:%d%s", port, path)
By(fmt.Sprintf("Checking status code against http://localhost:%d%s", port, path))
ginkgo.By(fmt.Sprintf("Checking status code against http://localhost:%d%s", port, path))
config.executeCurlCmd(cmd, expected)
}
@ -366,20 +378,20 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
stdout, err := RunHostCmd(config.Namespace, podName, cmd)
if err != nil {
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
Logf(msg)
e2elog.Logf(msg)
return false, nil
}
if !strings.Contains(stdout, expected) {
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
Logf(msg)
e2elog.Logf(msg)
return false, nil
}
return true, nil
}); pollErr != nil {
Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
e2elog.Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
desc, _ := RunKubectl(
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
Logf("%s", desc)
e2elog.Logf("%s", desc)
Failf("Timed out in %v: %v", retryTimeout, msg)
}
}
@ -394,7 +406,7 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName, hostname stri
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.IntOrString{IntVal: EndpointHttpPort},
Port: intstr.IntOrString{IntVal: EndpointHTTPPort},
},
},
}
@ -411,21 +423,21 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName, hostname stri
Containers: []v1.Container{
{
Name: "webserver",
Image: NetexecImageName,
Image: netexecImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", EndpointHttpPort),
fmt.Sprintf("--udp-port=%d", EndpointUdpPort),
fmt.Sprintf("--http-port=%d", EndpointHTTPPort),
fmt.Sprintf("--udp-port=%d", EndpointUDPPort),
},
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: EndpointHttpPort,
ContainerPort: EndpointHTTPPort,
},
{
Name: "udp",
ContainerPort: EndpointUdpPort,
ContainerPort: EndpointUDPPort,
Protocol: v1.ProtocolUDP,
},
},
@ -455,17 +467,17 @@ func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
Containers: []v1.Container{
{
Name: "webserver",
Image: NetexecImageName,
Image: netexecImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", EndpointHttpPort),
fmt.Sprintf("--udp-port=%d", EndpointUdpPort),
fmt.Sprintf("--http-port=%d", EndpointHTTPPort),
fmt.Sprintf("--udp-port=%d", EndpointUDPPort),
},
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: TestContainerHttpPort,
ContainerPort: testContainerHTTPPort,
},
},
},
@ -487,8 +499,8 @@ func (config *NetworkingTestConfig) createNodePortServiceSpec(svcName string, se
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
{Port: ClusterHttpPort, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(EndpointHttpPort)},
{Port: ClusterUdpPort, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(EndpointUdpPort)},
{Port: ClusterHTTPPort, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(EndpointHTTPPort)},
{Port: ClusterUDPPort, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(EndpointUDPPort)},
},
Selector: selector,
SessionAffinity: sessionAffinity,
@ -504,6 +516,7 @@ func (config *NetworkingTestConfig) createSessionAffinityService(selector map[st
config.SessionAffinityService = config.createService(config.createNodePortServiceSpec(sessionAffinityServiceName, selector, true))
}
// DeleteNodePortService deletes NodePort service.
func (config *NetworkingTestConfig) DeleteNodePortService() {
err := config.getServiceClient().Delete(config.NodePortService.Name, nil)
ExpectNoError(err, "error while deleting NodePortService. err:%v)", err)
@ -548,11 +561,11 @@ func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.S
// setupCore sets up the pods and core test config
// mainly for simplified node e2e setup
func (config *NetworkingTestConfig) setupCore(selector map[string]string) {
By("Creating the service pods in kubernetes")
ginkgo.By("Creating the service pods in kubernetes")
podName := "netserver"
config.EndpointPods = config.createNetProxyPods(podName, selector)
By("Creating test pods")
ginkgo.By("Creating test pods")
config.createTestPods()
epCount := len(config.EndpointPods)
@ -563,7 +576,7 @@ func (config *NetworkingTestConfig) setupCore(selector map[string]string) {
func (config *NetworkingTestConfig) setup(selector map[string]string) {
config.setupCore(selector)
By("Getting node addresses")
ginkgo.By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeExternalIP)
@ -571,16 +584,16 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
SkipUnlessNodeCountIsAtLeast(2)
config.Nodes = nodeList.Items
By("Creating the service on top of the pods in kubernetes")
ginkgo.By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(selector)
config.createSessionAffinityService(selector)
for _, p := range config.NodePortService.Spec.Ports {
switch p.Protocol {
case v1.ProtocolUDP:
config.NodeUdpPort = int(p.NodePort)
config.NodeUDPPort = int(p.NodePort)
case v1.ProtocolTCP:
config.NodeHttpPort = int(p.NodePort)
config.NodeHTTPPort = int(p.NodePort)
default:
continue
}
@ -652,6 +665,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
return runningPods
}
// DeleteNetProxyPod deletes the first endpoint pod and waits for it being removed.
func (config *NetworkingTestConfig) DeleteNetProxyPod() {
pod := config.EndpointPods[0]
config.getPodClient().Delete(pod.Name, metav1.NewDeleteOptions(0))
@ -689,17 +703,18 @@ func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.Namespac
return config.f.ClientSet.CoreV1().Namespaces()
}
// CheckReachabilityFromPod checks reachability from the specified pod.
func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) {
cmd := fmt.Sprintf("wget -T 5 -qO- %q", target)
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
_, err := RunHostCmd(namespace, pod, cmd)
if expectToBeReachable && err != nil {
Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
e2elog.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
return false, nil
}
if !expectToBeReachable && err == nil {
Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
e2elog.Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
return false, nil
}
return true, nil
@ -707,6 +722,7 @@ func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, n
ExpectNoError(err)
}
// HTTPPokeParams is a struct for HTTP poke parameters.
type HTTPPokeParams struct {
Timeout time.Duration
ExpectCode int // default = 200
@ -714,6 +730,7 @@ type HTTPPokeParams struct {
RetriableCodes []int
}
// HTTPPokeResult is a struct for HTTP poke result.
type HTTPPokeResult struct {
Status HTTPPokeStatus
Code int // HTTP code: 0 if the connection was not made
@ -721,17 +738,25 @@ type HTTPPokeResult struct {
Body []byte // if code != 0
}
// HTTPPokeStatus is string for representing HTTP poke status.
type HTTPPokeStatus string
const (
// HTTPSuccess is HTTP poke status which is success.
HTTPSuccess HTTPPokeStatus = "Success"
HTTPError HTTPPokeStatus = "UnknownError"
// Any time we add new errors, we should audit all callers of this.
HTTPTimeout HTTPPokeStatus = "TimedOut"
HTTPRefused HTTPPokeStatus = "ConnectionRefused"
HTTPRetryCode HTTPPokeStatus = "RetryCode"
HTTPWrongCode HTTPPokeStatus = "WrongCode"
// HTTPError is HTTP poke status which is error.
HTTPError HTTPPokeStatus = "UnknownError"
// HTTPTimeout is HTTP poke status which is timeout.
HTTPTimeout HTTPPokeStatus = "TimedOut"
// HTTPRefused is HTTP poke status which is connection refused.
HTTPRefused HTTPPokeStatus = "ConnectionRefused"
// HTTPRetryCode is HTTP poke status which is retry code.
HTTPRetryCode HTTPPokeStatus = "RetryCode"
// HTTPWrongCode is HTTP poke status which is wrong code.
HTTPWrongCode HTTPPokeStatus = "WrongCode"
// HTTPBadResponse is HTTP poke status which is bad response.
HTTPBadResponse HTTPPokeStatus = "BadResponse"
// Any time we add new errors, we should audit all callers of this.
)
// PokeHTTP tries to connect to a host on a port for a given URL path. Callers
@ -773,7 +798,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
params.ExpectCode = http.StatusOK
}
Logf("Poking %q", url)
e2elog.Logf("Poking %q", url)
resp, err := httpGetNoConnectionPoolTimeout(url, params.Timeout)
if err != nil {
@ -786,7 +811,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
} else {
ret.Status = HTTPError
}
Logf("Poke(%q): %v", url, err)
e2elog.Logf("Poke(%q): %v", url, err)
return ret
}
@ -797,7 +822,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
if err != nil {
ret.Status = HTTPError
ret.Error = fmt.Errorf("error reading HTTP body: %v", err)
Logf("Poke(%q): %v", url, ret.Error)
e2elog.Logf("Poke(%q): %v", url, ret.Error)
return ret
}
ret.Body = make([]byte, len(body))
@ -808,25 +833,25 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
if resp.StatusCode == code {
ret.Error = fmt.Errorf("retriable status code: %d", resp.StatusCode)
ret.Status = HTTPRetryCode
Logf("Poke(%q): %v", url, ret.Error)
e2elog.Logf("Poke(%q): %v", url, ret.Error)
return ret
}
}
ret.Status = HTTPWrongCode
ret.Error = fmt.Errorf("bad status code: %d", resp.StatusCode)
Logf("Poke(%q): %v", url, ret.Error)
e2elog.Logf("Poke(%q): %v", url, ret.Error)
return ret
}
if params.BodyContains != "" && !strings.Contains(string(body), params.BodyContains) {
ret.Status = HTTPBadResponse
ret.Error = fmt.Errorf("response does not contain expected substring: %q", string(body))
Logf("Poke(%q): %v", url, ret.Error)
e2elog.Logf("Poke(%q): %v", url, ret.Error)
return ret
}
ret.Status = HTTPSuccess
Logf("Poke(%q): success", url)
e2elog.Logf("Poke(%q): success", url)
return ret
}
@ -844,26 +869,34 @@ func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Re
return client.Get(url)
}
// UDPPokeParams is a struct for UDP poke parameters.
type UDPPokeParams struct {
Timeout time.Duration
Response string
}
// UDPPokeResult is a struct for UDP poke result.
type UDPPokeResult struct {
Status UDPPokeStatus
Error error // if there was any error
Response []byte // if code != 0
}
// UDPPokeStatus is string for representing UDP poke status.
type UDPPokeStatus string
const (
// UDPSuccess is UDP poke status which is success.
UDPSuccess UDPPokeStatus = "Success"
UDPError UDPPokeStatus = "UnknownError"
// Any time we add new errors, we should audit all callers of this.
UDPTimeout UDPPokeStatus = "TimedOut"
UDPRefused UDPPokeStatus = "ConnectionRefused"
// UDPError is UDP poke status which is error.
UDPError UDPPokeStatus = "UnknownError"
// UDPTimeout is UDP poke status which is timeout.
UDPTimeout UDPPokeStatus = "TimedOut"
// UDPRefused is UDP poke status which is connection refused.
UDPRefused UDPPokeStatus = "ConnectionRefused"
// UDPBadResponse is UDP poke status which is bad response.
UDPBadResponse UDPPokeStatus = "BadResponse"
// Any time we add new errors, we should audit all callers of this.
)
// PokeUDP tries to connect to a host on a port and send the given request. Callers
@ -898,13 +931,13 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
params = &UDPPokeParams{}
}
Logf("Poking %v", url)
e2elog.Logf("Poking %v", url)
con, err := net.Dial("udp", hostPort)
if err != nil {
ret.Status = UDPError
ret.Error = err
Logf("Poke(%q): %v", url, err)
e2elog.Logf("Poke(%q): %v", url, err)
return ret
}
@ -919,7 +952,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
} else {
ret.Status = UDPError
}
Logf("Poke(%q): %v", url, err)
e2elog.Logf("Poke(%q): %v", url, err)
return ret
}
@ -928,7 +961,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
if err != nil {
ret.Status = UDPError
ret.Error = err
Logf("Poke(%q): %v", url, err)
e2elog.Logf("Poke(%q): %v", url, err)
return ret
}
}
@ -937,7 +970,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
if bufsize == 0 {
bufsize = 4096
}
var buf []byte = make([]byte, bufsize)
var buf = make([]byte, bufsize)
n, err := con.Read(buf)
if err != nil {
ret.Error = err
@ -949,7 +982,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
} else {
ret.Status = UDPError
}
Logf("Poke(%q): %v", url, err)
e2elog.Logf("Poke(%q): %v", url, err)
return ret
}
ret.Response = buf[0:n]
@ -957,22 +990,24 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
if params.Response != "" && string(ret.Response) != params.Response {
ret.Status = UDPBadResponse
ret.Error = fmt.Errorf("response does not match expected string: %q", string(ret.Response))
Logf("Poke(%q): %v", url, ret.Error)
e2elog.Logf("Poke(%q): %v", url, ret.Error)
return ret
}
ret.Status = UDPSuccess
Logf("Poke(%q): success", url)
e2elog.Logf("Poke(%q): success", url)
return ret
}
// TestHitNodesFromOutside checkes HTTP connectivity from outside.
func TestHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error {
return TestHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1)
}
// TestHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count.
func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,
countToSucceed int) error {
Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)
e2elog.Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)
hittedHosts := sets.NewString()
count := 0
condition := func() (bool, error) {
@ -983,13 +1018,13 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
hittedHost := strings.TrimSpace(string(result.Body))
if !expectedHosts.Has(hittedHost) {
Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count)
e2elog.Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count)
count = 0
return false, nil
}
if !hittedHosts.Has(hittedHost) {
hittedHosts.Insert(hittedHost)
Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts)
e2elog.Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts)
}
if hittedHosts.Equal(expectedHosts) {
count++
@ -1007,7 +1042,7 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
return nil
}
// Blocks outgoing network traffic on 'node'. Then runs testFunc and returns its status.
// TestUnderTemporaryNetworkFailure blocks outgoing network traffic on 'node'. Then runs testFunc and returns its status.
// At the end (even in case of errors), the network traffic is brought back to normal.
// This function executes commands on a node so it will work only for some
// environments.
@ -1017,19 +1052,19 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
Failf("Error getting node external ip : %v", err)
}
masterAddresses := GetAllMasterAddresses(c)
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
ginkgo.By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
defer func() {
// This code will execute even if setting the iptables rule failed.
// It is on purpose because we may have an error even if the new rule
// had been inserted. (yes, we could look at the error code and ssh error
// separately, but I prefer to stay on the safe side).
By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
ginkgo.By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
for _, masterAddress := range masterAddresses {
UnblockNetwork(host, masterAddress)
}
}()
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
e2elog.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
@ -1037,7 +1072,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
BlockNetwork(host, masterAddress)
}
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
e2elog.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
}

View File

@ -25,29 +25,24 @@ import (
"sync"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
func EtcdUpgrade(target_storage, target_version string) error {
// EtcdUpgrade upgrades etcd on GCE.
func EtcdUpgrade(targetStorage, targetVersion string) error {
switch TestContext.Provider {
case "gce":
return etcdUpgradeGCE(target_storage, target_version)
return etcdUpgradeGCE(targetStorage, targetVersion)
default:
return fmt.Errorf("EtcdUpgrade() is not implemented for provider %s", TestContext.Provider)
}
}
func IngressUpgrade(isUpgrade bool) error {
switch TestContext.Provider {
case "gce":
return ingressUpgradeGCE(isUpgrade)
default:
return fmt.Errorf("IngressUpgrade() is not implemented for provider %s", TestContext.Provider)
}
}
// MasterUpgrade upgrades master node on GCE/GKE.
func MasterUpgrade(v string) error {
switch TestContext.Provider {
case "gce":
@ -61,38 +56,18 @@ func MasterUpgrade(v string) error {
}
}
func etcdUpgradeGCE(target_storage, target_version string) error {
func etcdUpgradeGCE(targetStorage, targetVersion string) error {
env := append(
os.Environ(),
"TEST_ETCD_VERSION="+target_version,
"STORAGE_BACKEND="+target_storage,
"TEST_ETCD_IMAGE=3.3.10-0")
"TEST_ETCD_VERSION="+targetVersion,
"STORAGE_BACKEND="+targetStorage,
"TEST_ETCD_IMAGE=3.3.10-1")
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
return err
}
func ingressUpgradeGCE(isUpgrade bool) error {
var command string
if isUpgrade {
// User specified image to upgrade to.
targetImage := TestContext.IngressUpgradeImage
if targetImage != "" {
command = fmt.Sprintf("sudo sed -i -re 's|(image:)(.*)|\\1 %s|' /etc/kubernetes/manifests/glbc.manifest", targetImage)
} else {
// Upgrade to latest HEAD image.
command = "sudo sed -i -re 's/(image:)(.*)/\\1 gcr.io\\/k8s-ingress-image-push\\/ingress-gce-e2e-glbc-amd64:master/' /etc/kubernetes/manifests/glbc.manifest"
}
} else {
// Downgrade to latest release image.
command = "sudo sed -i -re 's/(image:)(.*)/\\1 k8s.gcr.io\\/ingress-gce-glbc-amd64:v1.1.1/' /etc/kubernetes/manifests/glbc.manifest"
}
// Kubelet should restart glbc automatically.
sshResult, err := NodeExec(GetMasterHost(), command)
LogSSHResult(sshResult)
return err
}
// MasterUpgradeGCEWithKubeProxyDaemonSet upgrades master node on GCE with enabling/disabling the daemon set of kube-proxy.
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
func MasterUpgradeGCEWithKubeProxyDaemonSet(v string, enableKubeProxyDaemonSet bool) error {
return masterUpgradeGCE(v, enableKubeProxyDaemonSet)
@ -106,7 +81,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
env = append(env,
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
"TEST_ETCD_IMAGE=3.3.10-0")
"TEST_ETCD_IMAGE=3.3.10-1")
} else {
// In e2e tests, we skip the confirmation prompt about
// implicit etcd upgrades to simulate the user entering "y".
@ -135,7 +110,7 @@ func appendContainerCommandGroupIfNeeded(args []string) []string {
}
func masterUpgradeGKE(v string) error {
Logf("Upgrading master to %q", v)
e2elog.Logf("Upgrading master to %q", v)
args := []string{
"container",
"clusters",
@ -158,7 +133,7 @@ func masterUpgradeGKE(v string) error {
}
func masterUpgradeKubernetesAnywhere(v string) error {
Logf("Upgrading master to %q", v)
e2elog.Logf("Upgrading master to %q", v)
kaPath := TestContext.KubernetesAnywherePath
originalConfigPath := filepath.Join(kaPath, ".config")
@ -176,7 +151,7 @@ func masterUpgradeKubernetesAnywhere(v string) error {
defer func() {
// revert .config.bak to .config
if err := os.Rename(backupConfigPath, originalConfigPath); err != nil {
Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath)
e2elog.Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath)
}
}()
@ -194,6 +169,7 @@ func masterUpgradeKubernetesAnywhere(v string) error {
return nil
}
// NodeUpgrade upgrades nodes on GCE/GKE.
func NodeUpgrade(f *Framework, v string, img string) error {
// Perform the upgrade.
var err error
@ -211,6 +187,7 @@ func NodeUpgrade(f *Framework, v string, img string) error {
return waitForNodesReadyAfterUpgrade(f)
}
// NodeUpgradeGCEWithKubeProxyDaemonSet upgrades nodes on GCE with enabling/disabling the daemon set of kube-proxy.
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, img string, enableKubeProxyDaemonSet bool) error {
// Perform the upgrade.
@ -229,7 +206,7 @@ func waitForNodesReadyAfterUpgrade(f *Framework) error {
if err != nil {
return fmt.Errorf("couldn't detect number of nodes")
}
Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
e2elog.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
if _, err := CheckNodesReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
return err
}
@ -250,7 +227,7 @@ func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
}
func nodeUpgradeGKE(v string, img string) error {
Logf("Upgrading nodes to version %q and image %q", v, img)
e2elog.Logf("Upgrading nodes to version %q and image %q", v, img)
args := []string{
"container",
"clusters",
@ -301,7 +278,7 @@ func MigTemplate() (string, error) {
if val := ParseKVLines(output, key); len(val) > 0 {
url := strings.Split(val, "/")
templ = url[len(url)-1]
Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ)
e2elog.Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ)
return true, nil
}
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
@ -320,7 +297,7 @@ func gceUpgradeScript() string {
}
func waitForSSHTunnels() {
Logf("Waiting for SSH tunnels to establish")
e2elog.Logf("Waiting for SSH tunnels to establish")
RunKubectl("run", "ssh-tunnel-test",
"--image=busybox",
"--restart=Never",
@ -375,19 +352,19 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
go func() {
defer wg.Done()
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
err := IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
e2elog.Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
if err != nil {
Logf("ERROR while stopping node %q: %v", node.Name, err)
e2elog.Logf("ERROR while stopping node %q: %v", node.Name, err)
return
}
time.Sleep(k.config.SimulatedDowntime)
Logf("Rebooting %q to repair the node", node.Name)
err = IssueSSHCommand("sudo reboot", k.provider, &node)
e2elog.Logf("Rebooting %q to repair the node", node.Name)
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
if err != nil {
Logf("ERROR while rebooting node %q: %v", node.Name, err)
e2elog.Logf("ERROR while rebooting node %q: %v", node.Name, err)
return
}
}()
@ -395,6 +372,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
wg.Wait()
}
// DeleteNodeOnCloudProvider deletes the specified node.
func DeleteNodeOnCloudProvider(node *v1.Node) error {
return TestContext.CloudConfig.Provider.DeleteNode(node)
}

View File

@ -19,19 +19,20 @@ package framework
import (
"fmt"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/perftype"
)
// TODO(random-liu): Change the tests to actually use PerfData from the beginning instead of
// translating one to the other here.
// currentApiCallMetricsVersion is the current apicall performance metrics version. We should
// currentAPICallMetricsVersion is the current apicall performance metrics version. We should
// bump up the version each time we make incompatible change to the metrics.
const currentApiCallMetricsVersion = "v1"
const currentAPICallMetricsVersion = "v1"
// ApiCallToPerfData transforms APIResponsiveness to PerfData.
func ApiCallToPerfData(apicalls *APIResponsiveness) *perftype.PerfData {
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
// APICallToPerfData transforms APIResponsiveness to PerfData.
func APICallToPerfData(apicalls *APIResponsiveness) *perftype.PerfData {
perfData := &perftype.PerfData{Version: currentAPICallMetricsVersion}
for _, apicall := range apicalls.APICalls {
item := perftype.DataItem{
Data: map[string]float64{
@ -70,7 +71,7 @@ func latencyToPerfData(l LatencyMetric, name string) perftype.DataItem {
// PodStartupLatencyToPerfData transforms PodStartupLatency to PerfData.
func PodStartupLatencyToPerfData(latency *PodStartupLatency) *perftype.PerfData {
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
perfData := &perftype.PerfData{Version: currentAPICallMetricsVersion}
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.CreateToScheduleLatency, "create_to_schedule"))
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.ScheduleToRunLatency, "schedule_to_run"))
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.RunToWatchLatency, "run_to_watch"))
@ -100,7 +101,7 @@ func CPUUsageToPerfData(usagePerNode NodesCPUSummary) *perftype.PerfData {
func PrintPerfData(p *perftype.PerfData) {
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
if str := PrettyPrintJSON(p); str != "" {
Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
e2elog.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
}
}

View File

@ -22,22 +22,24 @@ import (
"sync"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// DefaultPodDeletionTimeout is the default timeout for deleting pod
const DefaultPodDeletionTimeout = 3 * time.Minute
// ImageWhiteList is the images used in the current test suite. It should be initialized in test suite and
@ -45,7 +47,7 @@ const DefaultPodDeletionTimeout = 3 * time.Minute
// node e2e test.
var ImageWhiteList sets.String
// Convenience method for getting a pod client interface in the framework's namespace,
// PodClient is a convenience method for getting a pod client interface in the framework's namespace,
// possibly applying test-suite specific transformations to the pod spec, e.g. for
// node e2e pod scheduling.
func (f *Framework) PodClient() *PodClient {
@ -55,7 +57,7 @@ func (f *Framework) PodClient() *PodClient {
}
}
// Convenience method for getting a pod client interface in an alternative namespace,
// PodClientNS is a convenience method for getting a pod client interface in an alternative namespace,
// possibly applying test-suite specific transformations to the pod spec, e.g. for
// node e2e pod scheduling.
func (f *Framework) PodClientNS(namespace string) *PodClient {
@ -65,6 +67,7 @@ func (f *Framework) PodClientNS(namespace string) *PodClient {
}
}
// PodClient is a struct for pod client.
type PodClient struct {
f *Framework
v1core.PodInterface
@ -96,15 +99,15 @@ func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
func (c *PodClient) CreateEventually(pod *v1.Pod, opts ...interface{}) *v1.Pod {
c.mungeSpec(pod)
var ret *v1.Pod
Eventually(func() error {
gomega.Eventually(func() error {
p, err := c.PodInterface.Create(pod)
ret = p
return err
}, opts...).ShouldNot(HaveOccurred(), "Failed to create %q pod", pod.GetName())
}, opts...).ShouldNot(gomega.HaveOccurred(), "Failed to create %q pod", pod.GetName())
return ret
}
// CreateSync creates a new pod according to the framework specifications in the given namespace, and waits for it to start.
// CreateSyncInNamespace creates a new pod according to the framework specifications in the given namespace, and waits for it to start.
func (c *PodClient) CreateSyncInNamespace(pod *v1.Pod, namespace string) *v1.Pod {
p := c.Create(pod)
ExpectNoError(WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
@ -127,7 +130,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
wg.Add(1)
go func(i int, pod *v1.Pod) {
defer wg.Done()
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
ps[i] = c.CreateSync(pod)
}(i, pod)
}
@ -147,11 +150,11 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
updateFn(pod)
_, err = c.PodInterface.Update(pod)
if err == nil {
Logf("Successfully updated pod %q", name)
e2elog.Logf("Successfully updated pod %q", name)
return true, nil
}
if errors.IsConflict(err) {
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
e2elog.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
return false, nil
}
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
@ -171,8 +174,8 @@ func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options
if err != nil && !errors.IsNotFound(err) {
Failf("Failed to delete pod %q: %v", name, err)
}
Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
2*time.Second, timeout)).To(Succeed(), "wait for pod %q to disappear", name)
gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
}
// mungeSpec apply test-suite specific transformations to the pod spec.
@ -181,7 +184,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
return
}
Expect(pod.Spec.NodeName).To(Or(BeZero(), Equal(TestContext.NodeName)), "Test misconfigured")
gomega.Expect(pod.Spec.NodeName).To(gomega.Or(gomega.BeZero(), gomega.Equal(TestContext.NodeName)), "Test misconfigured")
pod.Spec.NodeName = TestContext.NodeName
// Node e2e does not support the default DNSClusterFirst policy. Set
// the policy to DNSDefault, which is configured per node.
@ -204,18 +207,18 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
}
// If the image policy is not PullAlways, the image must be in the white list and
// pre-pulled.
Expect(ImageWhiteList.Has(c.Image)).To(BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
gomega.Expect(ImageWhiteList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
// Do not pull images during the tests because the images in white list should have
// been prepulled.
c.ImagePullPolicy = v1.PullNever
}
}
// TODO(random-liu): Move pod wait function into this file
// WaitForSuccess waits for pod to succeed.
// TODO(random-liu): Move pod wait function into this file
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
f := c.f
Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -226,13 +229,13 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
return false, nil
}
},
)).To(Succeed(), "wait for pod %q to success", name)
)).To(gomega.Succeed(), "wait for pod %q to success", name)
}
// WaitForFailure waits for pod to fail.
func (c *PodClient) WaitForFailure(name string, timeout time.Duration) {
f := c.f
Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -243,14 +246,31 @@ func (c *PodClient) WaitForFailure(name string, timeout time.Duration) {
return false, nil
}
},
)).To(Succeed(), "wait for pod %q to fail", name)
)).To(gomega.Succeed(), "wait for pod %q to fail", name)
}
// WaitForSuccess waits for pod to succeed or an error event for that pod.
// WaitForFinish waits for pod to finish running, regardless of success or failure.
func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
f := c.f
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
return true, nil
case v1.PodSucceeded:
return true, nil
default:
return false, nil
}
},
)).To(gomega.Succeed(), "wait for pod %q to finish running", name)
}
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
var ev *v1.Event
err := wait.Poll(Poll, PodStartTimeout, func() (bool, error) {
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(legacyscheme.Scheme, pod)
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
@ -287,6 +307,7 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
return nil
}
// PodIsReady returns true if the specified pod is ready. Otherwise false.
func (c *PodClient) PodIsReady(name string) bool {
pod, err := c.Get(name, metav1.GetOptions{})
ExpectNoError(err)

View File

@ -25,10 +25,13 @@ import (
"strings"
"sync"
"time"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
const (
// Default value for how long the CPU profile is gathered for.
// DefaultCPUProfileSeconds is default value for how long the CPU profile is gathered for.
DefaultCPUProfileSeconds = 30
)
@ -93,7 +96,7 @@ func gatherProfile(componentName, profileBaseName, profileKind string) error {
// Get the profile data over SSH.
getCommand := fmt.Sprintf("curl -s localhost:%v/debug/pprof/%s", profilePort, profileKind)
sshResult, err := SSH(getCommand, GetMasterHost()+":22", TestContext.Provider)
sshResult, err := e2essh.SSH(getCommand, GetMasterHost()+":22", TestContext.Provider)
if err != nil {
return fmt.Errorf("Failed to execute curl command on master through SSH: %v", err)
}
@ -168,25 +171,28 @@ func gatherProfile(componentName, profileBaseName, profileKind string) error {
// that the function finishes. There's also a polling-based gatherer utility for
// CPU profiles available below.
// GatherCPUProfile gathers CPU profile.
func GatherCPUProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
GatherCPUProfileForSeconds(componentName, profileBaseName, DefaultCPUProfileSeconds, wg)
}
// GatherCPUProfileForSeconds gathers CPU profile for specified seconds.
func GatherCPUProfileForSeconds(componentName string, profileBaseName string, seconds int, wg *sync.WaitGroup) {
if wg != nil {
defer wg.Done()
}
if err := gatherProfile(componentName, profileBaseName, fmt.Sprintf("profile?seconds=%v", seconds)); err != nil {
Logf("Failed to gather %v CPU profile: %v", componentName, err)
e2elog.Logf("Failed to gather %v CPU profile: %v", componentName, err)
}
}
// GatherMemoryProfile gathers memory profile.
func GatherMemoryProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
if wg != nil {
defer wg.Done()
}
if err := gatherProfile(componentName, profileBaseName, "heap"); err != nil {
Logf("Failed to gather %v memory profile: %v", componentName, err)
e2elog.Logf("Failed to gather %v memory profile: %v", componentName, err)
}
}

View File

@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
)
// Factory is a func which operates provider specific behavior.
type Factory func() (ProviderInterface, error)
var (
@ -112,45 +113,67 @@ type ProviderInterface interface {
// which doesn't do anything.
type NullProvider struct{}
// FrameworkBeforeEach is a base implementation which does BeforeEach.
func (n NullProvider) FrameworkBeforeEach(f *Framework) {}
func (n NullProvider) FrameworkAfterEach(f *Framework) {}
// FrameworkAfterEach is a base implementation which does AfterEach.
func (n NullProvider) FrameworkAfterEach(f *Framework) {}
// ResizeGroup is a base implementation which resizes group.
func (n NullProvider) ResizeGroup(string, int32) error {
return fmt.Errorf("Provider does not support InstanceGroups")
}
// GetGroupNodes is a base implementation which returns group nodes.
func (n NullProvider) GetGroupNodes(group string) ([]string, error) {
return nil, fmt.Errorf("provider does not support InstanceGroups")
}
// GroupSize returns the size of an instance group
func (n NullProvider) GroupSize(group string) (int, error) {
return -1, fmt.Errorf("provider does not support InstanceGroups")
}
// DeleteNode is a base implementation which deletes a node.
func (n NullProvider) DeleteNode(node *v1.Node) error {
return fmt.Errorf("provider does not support DeleteNode")
}
// CreatePD is a base implementation which creates PD.
func (n NullProvider) CreatePD(zone string) (string, error) {
return "", fmt.Errorf("provider does not support volume creation")
}
// DeletePD is a base implementation which deletes PD.
func (n NullProvider) DeletePD(pdName string) error {
return fmt.Errorf("provider does not support volume deletion")
}
// CreatePVSource is a base implementation which creates PV source.
func (n NullProvider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
return nil, fmt.Errorf("Provider not supported")
}
// DeletePVSource is a base implementation which deletes PV source.
func (n NullProvider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return fmt.Errorf("Provider not supported")
}
// CleanupServiceResources is a base implementation which cleans up service resources.
func (n NullProvider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
}
// EnsureLoadBalancerResourcesDeleted is a base implementation which ensures load balancer is deleted.
func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
return nil
}
// LoadBalancerSrcRanges is a base implementation which returns the ranges of ips used by load balancers.
func (n NullProvider) LoadBalancerSrcRanges() []string {
return nil
}
// EnableAndDisableInternalLB is a base implementation which returns functions for enabling/disabling an internal LB.
func (n NullProvider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) {
nop := func(svc *v1.Service) {}
return nop, nop

View File

@ -28,8 +28,10 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
const (
@ -41,8 +43,8 @@ var (
isPSPEnabled bool
)
// Creates a PodSecurityPolicy that allows everything.
func PrivilegedPSP(name string) *policy.PodSecurityPolicy {
// privilegedPSP creates a PodSecurityPolicy that allows everything.
func privilegedPSP(name string) *policy.PodSecurityPolicy {
allowPrivilegeEscalation := true
return &policy.PodSecurityPolicy{
ObjectMeta: metav1.ObjectMeta{
@ -76,17 +78,18 @@ func PrivilegedPSP(name string) *policy.PodSecurityPolicy {
}
}
// IsPodSecurityPolicyEnabled returns true if PodSecurityPolicy is enabled. Otherwise false.
func IsPodSecurityPolicyEnabled(f *Framework) bool {
isPSPEnabledOnce.Do(func() {
psps, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().List(metav1.ListOptions{})
if err != nil {
Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
e2elog.Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
isPSPEnabled = false
} else if psps == nil || len(psps.Items) == 0 {
Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
e2elog.Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
isPSPEnabled = false
} else {
Logf("Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled.")
e2elog.Logf("Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled.")
isPSPEnabled = true
}
})
@ -97,7 +100,7 @@ var (
privilegedPSPOnce sync.Once
)
func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
func createPrivilegedPSPBinding(f *Framework, namespace string) {
if !IsPodSecurityPolicyEnabled(f) {
return
}
@ -111,13 +114,13 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
return
}
psp := PrivilegedPSP(podSecurityPolicyPrivileged)
psp := privilegedPSP(podSecurityPolicyPrivileged)
psp, err = f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(psp)
if !apierrs.IsAlreadyExists(err) {
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
}
if IsRBACEnabled(f) {
if auth.IsRBACEnabled(f.ClientSet.RbacV1beta1()) {
// Create the Role to bind it to the namespace.
_, err = f.ClientSet.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
@ -134,10 +137,10 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
}
})
if IsRBACEnabled(f) {
By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
if auth.IsRBACEnabled(f.ClientSet.RbacV1beta1()) {
ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
podSecurityPolicyPrivileged, namespace))
BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(),
err := auth.BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(),
podSecurityPolicyPrivileged,
namespace,
rbacv1beta1.Subject{
@ -145,7 +148,8 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
Namespace: namespace,
Name: "default",
})
ExpectNoError(WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
ExpectNoError(err)
ExpectNoError(auth.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
}

View File

@ -20,8 +20,8 @@ import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -29,36 +29,42 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/pkg/volume/util"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
PDRetryTimeout = 5 * time.Minute
PDRetryPollTime = 5 * time.Second
pdRetryTimeout = 5 * time.Minute
pdRetryPollTime = 5 * time.Second
// VolumeSelectorKey is the key for volume selector.
VolumeSelectorKey = "e2e-pv-pool"
)
var (
// Common selinux labels
// SELinuxLabel is common selinux labels.
SELinuxLabel = &v1.SELinuxOptions{
Level: "s0:c0,c1"}
)
// Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
type pvval struct{}
// PVMap is a map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
// guaranteed to be unique. The value is {} (empty struct) since we're only interested
// in the PV's name and if it is present. We must always Get the pv object before
// referencing any of its values, eg its ClaimRef.
type pvval struct{}
type PVMap map[string]pvval
// Map of all PVCs used in the multi pv-pvc tests. The key is "namespace/pvc.Name". The
type pvcval struct{}
// PVCMap is a map of all PVCs used in the multi pv-pvc tests. The key is "namespace/pvc.Name". The
// value is {} (empty struct) since we're only interested in the PVC's name and if it is
// present. We must always Get the pvc object before referencing any of its values, eg.
// its VolumeName.
// Note: It's unsafe to add keys to a map in a loop. Their insertion in the map is
// unpredictable and can result in the same key being iterated over again.
type pvcval struct{}
type PVCMap map[types.NamespacedName]pvcval
// PersistentVolumeConfig is consumed by MakePersistentVolume() to generate a PV object
@ -84,7 +90,6 @@ type PersistentVolumeConfig struct {
// PersistentVolumeClaimConfig is consumed by MakePersistentVolumeClaim() to generate a PVC object.
// AccessModes defaults to all modes (RWO, RWX, ROX) if left empty
// (+optional) Annotations defines the PVC's annotations
type PersistentVolumeClaimConfig struct {
AccessModes []v1.PersistentVolumeAccessMode
Annotations map[string]string
@ -93,7 +98,15 @@ type PersistentVolumeClaimConfig struct {
VolumeMode *v1.PersistentVolumeMode
}
// Clean up a pv and pvc in a single pv/pvc test case.
// NodeSelection specifies where to run a pod, using a combination of fixed node name,
// node selector and/or affinity.
type NodeSelection struct {
Name string
Selector map[string]string
Affinity *v1.Affinity
}
// PVPVCCleanup cleans up a pv and pvc in a single pv/pvc test case.
// Note: delete errors are appended to []error so that we can attempt to delete both the pvc and pv.
func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error {
var errs []error
@ -104,7 +117,7 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
}
} else {
Logf("pvc is nil")
e2elog.Logf("pvc is nil")
}
if pv != nil {
err := DeletePersistentVolume(c, pv.Name)
@ -112,12 +125,12 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
}
} else {
Logf("pv is nil")
e2elog.Logf("pv is nil")
}
return errs
}
// Clean up pvs and pvcs in multi-pv-pvc test cases. Entries found in the pv and claim maps are
// PVPVCMapCleanup Cleans up pvs and pvcs in multi-pv-pvc test cases. Entries found in the pv and claim maps are
// deleted as long as the Delete api call succeeds.
// Note: delete errors are appended to []error so that as many pvcs and pvs as possible are deleted.
func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap) []error {
@ -143,10 +156,10 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa
return errs
}
// Delete the PV.
// DeletePersistentVolume deletes the PV.
func DeletePersistentVolume(c clientset.Interface, pvName string) error {
if c != nil && len(pvName) > 0 {
Logf("Deleting PersistentVolume %q", pvName)
e2elog.Logf("Deleting PersistentVolume %q", pvName)
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
if err != nil && !apierrs.IsNotFound(err) {
return fmt.Errorf("PV Delete API error: %v", err)
@ -155,10 +168,10 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error {
return nil
}
// Delete the Claim
// DeletePersistentVolumeClaim deletes the Claim.
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error {
if c != nil && len(pvcName) > 0 {
Logf("Deleting PersistentVolumeClaim %q", pvcName)
e2elog.Logf("Deleting PersistentVolumeClaim %q", pvcName)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
if err != nil && !apierrs.IsNotFound(err) {
return fmt.Errorf("PVC Delete API error: %v", err)
@ -167,19 +180,19 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
return nil
}
// Delete the PVC and wait for the PV to enter its expected phase. Validate that the PV
// DeletePVCandValidatePV deletes the PVC and waits for the PV to enter its expected phase. Validate that the PV
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
// phase value to expect for the pv bound to the to-be-deleted claim.
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
pvname := pvc.Spec.VolumeName
Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
e2elog.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
if err != nil {
return err
}
// Wait for the PV's phase to return to be `expectPVPhase`
Logf("Waiting for reclaim process to complete.")
e2elog.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, Poll, PVReclaimingTimeout)
if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
@ -204,11 +217,11 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
}
}
Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
e2elog.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
return nil
}
// Wraps deletePVCandValidatePV() by calling the function in a loop over the PV map. Only bound PVs
// DeletePVCandValidatePVGroup wraps deletePVCandValidatePV() by calling the function in a loop over the PV map. Only bound PVs
// are deleted. Validates that the claim was deleted and the PV is in the expected Phase (Released,
// Available, Bound).
// Note: if there are more claims than pvs then some of the remaining claims may bind to just made
@ -261,12 +274,12 @@ func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVol
return pv, nil
}
// create the PV resource. Fails test on error.
// CreatePV creates the PV resource. Fails test on error.
func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(c, pv)
}
// create the PVC resource. Fails test on error.
// CreatePVC creates the PVC resource. Fails test on error.
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
if err != nil {
@ -275,7 +288,7 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim)
return pvc, nil
}
// Create a PVC followed by the PV based on the passed in nfs-server ip and
// CreatePVCPV creates a PVC followed by the PV based on the passed in nfs-server ip and
// namespace. If the "preBind" bool is true then pre-bind the PV to the PVC
// via the PV's ClaimRef. Return the pv and pvc to reflect the created objects.
// Note: in the pre-bind case the real PVC name, which is generated, is not
@ -292,7 +305,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
// make the pv spec
pv := MakePersistentVolume(pvConfig)
By(fmt.Sprintf("Creating a PVC followed by a%s PV", preBindMsg))
ginkgo.By(fmt.Sprintf("Creating a PVC followed by a%s PV", preBindMsg))
pvc, err := CreatePVC(c, ns, pvc)
if err != nil {
return nil, nil, err
@ -309,7 +322,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
return pv, pvc, nil
}
// Create a PV followed by the PVC based on the passed in nfs-server ip and
// CreatePVPVC creates a PV followed by the PVC based on the passed in nfs-server ip and
// namespace. If the "preBind" bool is true then pre-bind the PVC to the PV
// via the PVC's VolumeName. Return the pv and pvc to reflect the created
// objects.
@ -321,7 +334,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
if preBind {
preBindMsg = " pre-bound"
}
Logf("Creating a PV followed by a%s PVC", preBindMsg)
e2elog.Logf("Creating a PV followed by a%s PVC", preBindMsg)
// make the pv and pvc definitions
pv := MakePersistentVolume(pvConfig)
@ -343,7 +356,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
return pv, pvc, nil
}
// Create the desired number of PVs and PVCs and return them in separate maps. If the
// CreatePVsPVCs creates the desired number of PVs and PVCs and returns them in separate maps. If the
// number of PVs != the number of PVCs then the min of those two counts is the number of
// PVs expected to bind. If a Create error occurs, the returned maps may contain pv and pvc
// entries for the resources that were successfully created. In other words, when the caller
@ -391,10 +404,10 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
return pvMap, pvcMap, nil
}
// Wait for the pv and pvc to bind to each other.
// WaitOnPVandPVC waits for the pv and pvc to bind to each other.
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV
Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
e2elog.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, Poll, ClaimBindingTimeout)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
@ -434,7 +447,7 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
return nil
}
// Search for bound PVs and PVCs by examining pvols for non-nil claimRefs.
// WaitAndVerifyBinds searches for bound PVs and PVCs by examining pvols for non-nil claimRefs.
// NOTE: Each iteration waits for a maximum of 3 minutes per PV and, if the PV is bound,
// up to 3 minutes for the PVC. When the number of PVs != number of PVCs, this can lead
// to situations where the maximum wait times are reached several times in succession,
@ -450,8 +463,8 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
for pvName := range pvols {
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, Poll, PVBindingTimeout)
if err != nil && len(pvols) > len(claims) {
Logf("WARN: pv %v is not bound after max wait", pvName)
Logf(" This may be ok since there are more pvs than pvcs")
e2elog.Logf("WARN: pv %v is not bound after max wait", pvName)
e2elog.Logf(" This may be ok since there are more pvs than pvcs")
continue
}
if err != nil {
@ -487,15 +500,15 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
// Test the pod's exit code to be zero.
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
By("Pod should terminate with exitcode 0 (success)")
ginkgo.By("Pod should terminate with exitcode 0 (success)")
if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
}
Logf("Pod %v succeeded ", pod.Name)
e2elog.Logf("Pod %v succeeded ", pod.Name)
return nil
}
// Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
// DeletePodWithWait deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
if pod == nil {
@ -504,11 +517,10 @@ func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
return DeletePodWithWaitByName(f, c, pod.GetName(), pod.GetNamespace())
}
// Deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
// DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWaitByName(f *Framework, c clientset.Interface, podName, podNamespace string) error {
const maxWait = 5 * time.Minute
Logf("Deleting pod %q in namespace %q", podName, podNamespace)
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
if err != nil {
if apierrs.IsNotFound(err) {
@ -516,19 +528,19 @@ func DeletePodWithWaitByName(f *Framework, c clientset.Interface, podName, podNa
}
return fmt.Errorf("pod Delete API error: %v", err)
}
Logf("Wait up to %v for pod %q to be fully deleted", maxWait, podName)
err = f.WaitForPodNotFound(podName, maxWait)
e2elog.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = f.WaitForPodNotFound(podName, PodDeleteTimeout)
if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
}
return nil
}
// Create the test pod, wait for (hopefully) success, and then delete the pod.
// CreateWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod.
// Note: need named return value so that the err assignment in the defer sets the returned error.
// Has been shown to be necessary using Go 1.7.
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (err error) {
Logf("Creating nfs test pod")
e2elog.Logf("Creating nfs test pod")
pod := MakeWritePod(ns, pvc)
runPod, err := c.CoreV1().Pods(ns).Create(pod)
if err != nil {
@ -553,7 +565,7 @@ func makePvcKey(ns, name string) types.NamespacedName {
return types.NamespacedName{Namespace: ns, Name: name}
}
// Returns a PV definition based on the nfs server IP. If the PVC is not nil
// MakePersistentVolume returns a PV definition based on the nfs server IP. If the PVC is not nil
// then the PV is defined with a ClaimRef which includes the PVC's namespace.
// If the PVC is nil then the PV is not defined with a ClaimRef. If no reclaimPolicy
// is assigned, assumes "Retain". Specs are expected to match the test's PVC.
@ -564,7 +576,7 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
var claimRef *v1.ObjectReference
// If the reclaimPolicy is not provided, assume Retain
if pvConfig.ReclaimPolicy == "" {
Logf("PV ReclaimPolicy unspecified, default: Retain")
e2elog.Logf("PV ReclaimPolicy unspecified, default: Retain")
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRetain
}
if pvConfig.Prebind != nil {
@ -600,7 +612,7 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
}
}
// Returns a PVC definition based on the namespace.
// MakePersistentVolumeClaim returns a PVC definition based on the namespace.
// Note: if this PVC is intended to be pre-bound to a PV, whose name is not
// known until the PV is instantiated, then the func CreatePVPVC will add
// pvc.Spec.VolumeName to this claim.
@ -608,7 +620,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
// Specs are expected to match this test's PersistentVolume
if len(cfg.AccessModes) == 0 {
Logf("AccessModes unspecified, default: all modes (RWO, RWX, ROX).")
e2elog.Logf("AccessModes unspecified, default: all modes (RWO, RWX, ROX).")
cfg.AccessModes = append(cfg.AccessModes, v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadOnlyMany)
}
@ -634,35 +646,38 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
func createPDWithRetry(zone string) (string, error) {
var err error
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
newDiskName, err := createPD(zone)
if err != nil {
Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
e2elog.Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
continue
}
Logf("Successfully created a new PD: %q.", newDiskName)
e2elog.Logf("Successfully created a new PD: %q.", newDiskName)
return newDiskName, nil
}
return "", err
}
// CreatePDWithRetry creates PD with retry.
func CreatePDWithRetry() (string, error) {
return createPDWithRetry("")
}
// CreatePDWithRetryAndZone creates PD on zone with retry.
func CreatePDWithRetryAndZone(zone string) (string, error) {
return createPDWithRetry(zone)
}
// DeletePDWithRetry deletes PD with retry.
func DeletePDWithRetry(diskName string) error {
var err error
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
err = deletePD(diskName)
if err != nil {
Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, PDRetryPollTime, err)
e2elog.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
continue
}
Logf("Successfully deleted PD %q.", diskName)
e2elog.Logf("Successfully deleted PD %q.", diskName)
return nil
}
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
@ -679,13 +694,13 @@ func deletePD(pdName string) error {
return TestContext.CloudConfig.Provider.DeletePD(pdName)
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// MakeWritePod returns a pod definition based on the namespace. The pod references the PVC's
// name.
func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
return MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// MakePod returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
if len(command) == 0 {
@ -730,8 +745,8 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
return podSpec
}
// Returns a pod definition based on the namespace using nginx image
func MakeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod {
// makeNginxPod returns a pod definition based on the namespace using nginx image
func makeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod {
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -771,7 +786,7 @@ func MakeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.Pers
return podSpec
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// MakeSecPod returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod.
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod {
@ -853,8 +868,9 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
return pod, nil
}
// CreateNginxPod creates an enginx pod.
func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) {
pod := MakeNginxPod(namespace, nodeSelector, pvclaims)
pod := makeNginxPod(namespace, nodeSelector, pvclaims)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
@ -872,16 +888,18 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m
return pod, nil
}
// create security pod with given claims
// CreateSecPod creates security pod with given claims
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeName(client, namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, "", timeout)
return CreateSecPodWithNodeSelection(client, namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, NodeSelection{}, timeout)
}
// create security pod with given claims
func CreateSecPodWithNodeName(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, nodeName string, timeout time.Duration) (*v1.Pod, error) {
// CreateSecPodWithNodeSelection creates security pod with given claims
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
// Setting nodeName
pod.Spec.NodeName = nodeName
// Setting node
pod.Spec.NodeName = node.Name
pod.Spec.NodeSelector = node.Selector
pod.Spec.Affinity = node.Affinity
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
@ -901,7 +919,37 @@ func CreateSecPodWithNodeName(client clientset.Interface, namespace string, pvcl
return pod, nil
}
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
// SetNodeAffinityRequirement sets affinity with specified operator to nodeName to nodeSelection
func SetNodeAffinityRequirement(nodeSelection *NodeSelection, operator v1.NodeSelectorOperator, nodeName string) {
// Add node-anti-affinity.
if nodeSelection.Affinity == nil {
nodeSelection.Affinity = &v1.Affinity{}
}
if nodeSelection.Affinity.NodeAffinity == nil {
nodeSelection.Affinity.NodeAffinity = &v1.NodeAffinity{}
}
if nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
}
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
v1.NodeSelectorTerm{
MatchFields: []v1.NodeSelectorRequirement{
{Key: "metadata.name", Operator: operator, Values: []string{nodeName}},
},
})
}
// SetAffinity sets affinity to nodeName to nodeSelection
func SetAffinity(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpIn, nodeName)
}
// SetAntiAffinity sets anti-affinity to nodeName to nodeSelection
func SetAntiAffinity(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpNotIn, nodeName)
}
// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
}
@ -926,7 +974,7 @@ func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSe
return pod, nil
}
// wait until all pvcs phase set to bound
// WaitForPVClaimBoundPhase waits until all pvcs phase set to bound
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) {
persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims))
@ -949,6 +997,7 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
return persistentvolumes, nil
}
// CreatePVSource creates a PV source.
func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) {
diskName, err := CreatePDWithRetryAndZone(zone)
if err != nil {
@ -957,6 +1006,50 @@ func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) {
return TestContext.CloudConfig.Provider.CreatePVSource(zone, diskName)
}
// DeletePVSource deletes a PV source.
func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return TestContext.CloudConfig.Provider.DeletePVSource(pvSource)
}
// GetBoundPV returns a PV details.
func GetBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
return pv, err
}
// GetDefaultStorageClassName returns default storageClass or return error
func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
list, err := c.StorageV1().StorageClasses().List(metav1.ListOptions{})
if err != nil {
return "", fmt.Errorf("Error listing storage classes: %v", err)
}
var scName string
for _, sc := range list.Items {
if storageutil.IsDefaultAnnotation(sc.ObjectMeta) {
if len(scName) != 0 {
return "", fmt.Errorf("Multiple default storage classes found: %q and %q", scName, sc.Name)
}
scName = sc.Name
}
}
if len(scName) == 0 {
return "", fmt.Errorf("No default storage class found")
}
e2elog.Logf("Default storage class: %q", scName)
return scName, nil
}
// SkipIfNoDefaultStorageClass skips tests if no default SC can be found.
func SkipIfNoDefaultStorageClass(c clientset.Interface) {
_, err := GetDefaultStorageClassName(c)
if err != nil {
Skipf("error finding default storageClass : %v", err)
}
}

View File

@ -21,16 +21,16 @@ import (
"strings"
"time"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
api "k8s.io/kubernetes/pkg/apis/core"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)
@ -82,48 +82,12 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
}
}
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
// none are running, otherwise it does what a synchronous scale operation would do.
func ScaleRCByLabels(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error {
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts)
if err != nil {
return err
}
if len(rcs.Items) == 0 {
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
}
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
for _, labelRC := range rcs.Items {
name := labelRC.Name
if err := ScaleRC(clientset, scalesGetter, ns, name, replicas, false); err != nil {
return err
}
rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
if replicas == 0 {
ps, err := testutils.NewPodStore(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector), fields.Everything())
if err != nil {
return err
}
defer ps.Stop()
if err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute); err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
} else {
if err := testutils.WaitForPodsWithLabelRunning(
clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
return err
}
}
}
return nil
}
type updateRcFunc func(d *v1.ReplicationController)
// UpdateReplicationControllerWithRetries retries updating the given rc on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
var rc *v1.ReplicationController
var updateErr error
@ -135,7 +99,7 @@ func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, na
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc)
if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(rc); err == nil {
Logf("Updating replication controller %q", name)
e2elog.Logf("Updating replication controller %q", name)
return true, nil
}
updateErr = err
@ -152,12 +116,15 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
}
// ScaleRC scales Replication Controller to be desired size.
func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers"))
}
// RunRC Launches (and verifies correctness) of a Replication Controller
// and will wait for all pods it spawns to become "Running".
func RunRC(config testutils.RCConfig) error {
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
ginkgo.By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunRC(config)
@ -178,12 +145,11 @@ func WaitForReplicationController(c clientset.Interface, namespace, name string,
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
e2elog.Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
} else {
Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil
}
e2elog.Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
@ -199,13 +165,13 @@ func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace s
rcs, err := c.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
switch {
case len(rcs.Items) != 0:
Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
e2elog.Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
return exist, nil
case len(rcs.Items) == 0:
Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
e2elog.Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
return !exist, nil
default:
Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
e2elog.Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
return false, nil
}
})
@ -249,38 +215,38 @@ func ValidateController(c clientset.Interface, containerImage string, replicas i
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop:
for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns))
pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas {
By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
ginkgo.By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
continue
}
var runningPods []string
for _, podID := range pods {
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
if running != "true" {
Logf("%s is created but not running", podID)
e2elog.Logf("%s is created but not running", podID)
continue waitLoop
}
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
currentImage = trimDockerRegistry(currentImage)
if currentImage != containerImage {
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
e2elog.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}
// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
Logf("%s is running right image but validator function failed: %v", podID, err)
e2elog.Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}
Logf("%s is verified up and running", podID)
e2elog.Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.

View File

@ -32,24 +32,30 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/system"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
// ResourceConstraint is a struct to hold constraints.
type ResourceConstraint struct {
CPUConstraint float64
MemoryConstraint uint64
}
// SingleContainerSummary is a struct to hold single container summary.
type SingleContainerSummary struct {
Name string
Cpu float64
CPU float64
Mem uint64
}
// ResourceUsageSummary is a struct to hold resource usage summary.
// we can't have int here, as JSON does not accept integer keys.
type ResourceUsageSummary map[string][]SingleContainerSummary
// NoCPUConstraint is the number of constraint for CPU.
const NoCPUConstraint = math.MaxFloat64
// PrintHumanReadable prints resource usage summary in human readable.
func (s *ResourceUsageSummary) PrintHumanReadable() string {
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
@ -57,17 +63,19 @@ func (s *ResourceUsageSummary) PrintHumanReadable() string {
buf.WriteString(fmt.Sprintf("%v percentile:\n", perc))
fmt.Fprintf(w, "container\tcpu(cores)\tmemory(MB)\n")
for _, summary := range summaries {
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.Name, summary.Cpu, float64(summary.Mem)/(1024*1024))
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.Name, summary.CPU, float64(summary.Mem)/(1024*1024))
}
w.Flush()
}
return buf.String()
}
// PrintJSON prints resource usage summary in JSON.
func (s *ResourceUsageSummary) PrintJSON() string {
return PrettyPrintJSON(*s)
}
// SummaryKind returns string of ResourceUsageSummary
func (s *ResourceUsageSummary) SummaryKind() string {
return "ResourceUsageSummary"
}
@ -159,13 +167,13 @@ func (w *resourceGatherWorker) singleProbe() {
} else {
nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs })
if err != nil {
Logf("Error while reading data from %v: %v", w.nodeName, err)
e2elog.Logf("Error while reading data from %v: %v", w.nodeName, err)
return
}
for k, v := range nodeUsage {
data[k] = v
if w.printVerboseLogs {
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
e2elog.Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
}
}
}
@ -175,7 +183,7 @@ func (w *resourceGatherWorker) singleProbe() {
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
defer utilruntime.HandleCrash()
defer w.wg.Done()
defer Logf("Closing worker for %v", w.nodeName)
defer e2elog.Logf("Closing worker for %v", w.nodeName)
defer func() { w.finished = true }()
select {
case <-time.After(initialSleep):
@ -193,6 +201,7 @@ func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
}
}
// ContainerResourceGatherer is a struct for gathering container resource.
type ContainerResourceGatherer struct {
client clientset.Interface
stopCh chan struct{}
@ -202,6 +211,7 @@ type ContainerResourceGatherer struct {
options ResourceGathererOptions
}
// ResourceGathererOptions is a struct to hold options for resource.
type ResourceGathererOptions struct {
InKubemark bool
Nodes NodesSet
@ -210,14 +220,19 @@ type ResourceGathererOptions struct {
PrintVerboseLogs bool
}
// NodesSet is a value of nodes set.
type NodesSet int
const (
AllNodes NodesSet = 0 // All containers on all nodes
MasterNodes NodesSet = 1 // All containers on Master nodes only
MasterAndDNSNodes NodesSet = 2 // All containers on Master nodes and DNS containers on other nodes
// AllNodes means all containers on all nodes.
AllNodes NodesSet = 0
// MasterNodes means all containers on Master nodes only.
MasterNodes NodesSet = 1
// MasterAndDNSNodes means all containers on Master nodes and DNS containers on other nodes.
MasterAndDNSNodes NodesSet = 2
)
// NewResourceUsageGatherer returns a new ContainerResourceGatherer.
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
g := ContainerResourceGatherer{
client: c,
@ -243,7 +258,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
if pods == nil {
pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
if err != nil {
Logf("Error while listing Pods: %v", err)
e2elog.Logf("Error while listing Pods: %v", err)
return nil, err
}
}
@ -267,7 +282,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("Error while listing Nodes: %v", err)
e2elog.Logf("Error while listing Nodes: %v", err)
return nil, err
}
@ -316,7 +331,7 @@ func (g *ContainerResourceGatherer) StartGatheringData() {
// specified resource constraints.
func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
close(g.stopCh)
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
e2elog.Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
finished := make(chan struct{})
go func() {
g.workerWg.Wait()
@ -324,7 +339,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
}()
select {
case <-finished:
Logf("Waitgroup finished.")
e2elog.Logf("Waitgroup finished.")
case <-time.After(2 * time.Minute):
unfinished := make([]string, 0)
for i := range g.workers {
@ -332,11 +347,11 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
unfinished = append(unfinished, g.workers[i].nodeName)
}
}
Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
e2elog.Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
}
if len(percentiles) == 0 {
Logf("Warning! Empty percentile list for stopAndPrintData.")
e2elog.Logf("Warning! Empty percentile list for stopAndPrintData.")
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
}
data := make(map[int]ResourceUsagePerContainer)
@ -360,7 +375,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
usage := data[perc][name]
summary[strconv.Itoa(perc)] = append(summary[strconv.Itoa(perc)], SingleContainerSummary{
Name: name,
Cpu: usage.CPUUsageInCores,
CPU: usage.CPUUsageInCores,
Mem: usage.MemoryWorkingSetInBytes,
})
// Verifying 99th percentile of resource usage

View File

@ -1,159 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
. "github.com/onsi/ginkgo"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
testutils "k8s.io/kubernetes/test/utils"
)
type updateRsFunc func(d *apps.ReplicaSet)
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
}
// CheckNewRSAnnotations check if the new RS's annotation is as expected
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return err
}
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
if err != nil {
return err
}
for k, v := range expectedAnnotations {
// Skip checking revision annotations
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
}
}
return nil
}
// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready.
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
rs, err := c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replicaset %q never became ready", name)
}
return err
}
// WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *apps.ReplicaSet) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(replicaSet.Spec.Replicas) && rs.Status.Replicas == *(rs.Spec.Replicas), nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replicaset %q never had desired number of replicas", replicaSet.Name)
}
return err
}
// WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return rs.Status.ObservedGeneration >= desiredGeneration && *rs.Spec.Replicas == targetReplicaNum, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replicaset %q never had desired number of .spec.replicas", replicaSet.Name)
}
return err
}
// WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.AvailableReplicas == targetReplicaNum, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replicaset %q never had desired number of .status.availableReplicas", replicaSet.Name)
}
return err
}
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunReplicaSet(config)
}
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *apps.ReplicaSet {
return &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicaSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Replicas: &replicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: imageName,
Image: image,
},
},
},
},
},
}
}

View File

@ -30,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
@ -39,12 +38,13 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
const (
@ -66,21 +66,26 @@ const (
// on AWS. A few minutes is typical, so use 10m.
LoadBalancerLagTimeoutAWS = 10 * time.Minute
// How long to wait for a load balancer to be created/modified.
//TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
// LoadBalancerCreateTimeoutDefault is the default time to wait for a load balancer to be created/modified.
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
LoadBalancerCreateTimeoutDefault = 20 * time.Minute
LoadBalancerCreateTimeoutLarge = 2 * time.Hour
// LoadBalancerCreateTimeoutLarge is the maximum time to wait for a load balancer to be created/modified.
LoadBalancerCreateTimeoutLarge = 2 * time.Hour
// Time required by the loadbalancer to cleanup, proportional to numApps/Ing.
// LoadBalancerCleanupTimeout is the time required by the loadbalancer to cleanup, proportional to numApps/Ing.
// Bring the cleanup timeout back down to 5m once b/33588344 is resolved.
LoadBalancerCleanupTimeout = 15 * time.Minute
// LoadBalancerPollTimeout is the time required by the loadbalancer to poll.
// On average it takes ~6 minutes for a single backend to come online in GCE.
LoadBalancerPollTimeout = 15 * time.Minute
LoadBalancerPollTimeout = 15 * time.Minute
// LoadBalancerPollInterval is the interval value in which the loadbalancer polls.
LoadBalancerPollInterval = 30 * time.Second
// LargeClusterMinNodesNumber is the number of nodes which a large cluster consists of.
LargeClusterMinNodesNumber = 100
// MaxNodesForEndpointsTests is the max number for testing endpoints.
// Don't test with more than 3 nodes.
// Many tests create an endpoint per node, in large clusters, this is
// resource and time intensive.
@ -98,10 +103,10 @@ const (
AffinityConfirmCount = 15
)
// This should match whatever the default/configured range is
// ServiceNodePortRange should match whatever the default/configured range is
var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768}
// A test jig to help service testing.
// ServiceTestJig is a test jig to help service testing.
type ServiceTestJig struct {
ID string
Name string
@ -235,6 +240,7 @@ func (j *ServiceTestJig) CreateServiceWithServicePort(labels map[string]string,
return j.Client.CoreV1().Services(namespace).Create(service)
}
// ChangeServiceType updates the given service's ServiceType to the given newType.
func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) {
ingressIP := ""
svc := j.UpdateServiceOrFail(namespace, name, func(s *v1.Service) {
@ -256,7 +262,7 @@ func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.Se
// If createPod is true, it also creates an RC with 1 replica of
// the standard netexec container used everywhere in this test.
func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName string, createPod bool) *v1.Service {
By("creating a service " + namespace + "/" + serviceName + " with type=NodePort and ExternalTrafficPolicy=Local")
ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=NodePort and ExternalTrafficPolicy=Local")
svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
@ -264,7 +270,7 @@ func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName s
})
if createPod {
By("creating a pod to be part of the service " + serviceName)
ginkgo.By("creating a pod to be part of the service " + serviceName)
j.RunOrFail(namespace, nil)
}
j.SanityCheckService(svc, v1.ServiceTypeNodePort)
@ -277,7 +283,7 @@ func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName s
// the standard netexec container used everywhere in this test.
func (j *ServiceTestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool,
tweak func(svc *v1.Service)) *v1.Service {
By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and ExternalTrafficPolicy=Local")
ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and ExternalTrafficPolicy=Local")
svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
// We need to turn affinity off for our LB distribution tests
@ -289,10 +295,10 @@ func (j *ServiceTestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceNa
})
if createPod {
By("creating a pod to be part of the service " + serviceName)
ginkgo.By("creating a pod to be part of the service " + serviceName)
j.RunOrFail(namespace, nil)
}
By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
return svc
@ -301,7 +307,7 @@ func (j *ServiceTestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceNa
// CreateLoadBalancerService creates a loadbalancer service and waits
// for it to acquire an ingress IP.
func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string, timeout time.Duration, tweak func(svc *v1.Service)) *v1.Service {
By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer")
ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer")
svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
// We need to turn affinity off for our LB distribution tests
@ -311,22 +317,24 @@ func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string
}
})
By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
return svc
}
// GetNodeAddresses returns a list of addresses of the given addressType for the given node
func GetNodeAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) {
for j := range node.Status.Addresses {
nodeAddress := &node.Status.Addresses[j]
if nodeAddress.Type == addressType {
if nodeAddress.Type == addressType && nodeAddress.Address != "" {
ips = append(ips, nodeAddress.Address)
}
}
return
}
// CollectAddresses returns a list of addresses of the given addressType for the given list of nodes
func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []string {
ips := []string{}
for i := range nodes.Items {
@ -335,6 +343,7 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
return ips
}
// GetNodePublicIps returns a public IP list of nodes.
func GetNodePublicIps(c clientset.Interface) ([]string, error) {
nodes := GetReadySchedulableNodesOrDie(c)
@ -346,6 +355,7 @@ func GetNodePublicIps(c clientset.Interface) ([]string, error) {
return ips, nil
}
// PickNodeIP picks one public node IP
func PickNodeIP(c clientset.Interface) string {
publicIps, err := GetNodePublicIps(c)
ExpectNoError(err)
@ -403,7 +413,7 @@ func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string {
return nodeMap
}
// getNodes returns the first maxNodesForTest nodes. Useful in large clusters
// GetNodes returns the first maxNodesForTest nodes. Useful in large clusters
// where we don't eg: want to create an endpoint per node.
func (j *ServiceTestJig) GetNodes(maxNodesForTest int) (nodes *v1.NodeList) {
nodes = GetReadySchedulableNodesOrDie(j.Client)
@ -414,6 +424,7 @@ func (j *ServiceTestJig) GetNodes(maxNodesForTest int) (nodes *v1.NodeList) {
return nodes
}
// GetNodesNames returns a list of names of the first maxNodesForTest nodes
func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string {
nodes := j.GetNodes(maxNodesForTest)
nodesNames := []string{}
@ -423,26 +434,27 @@ func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string {
return nodesNames
}
// WaitForEndpointOnNode waits for a service endpoint on the given node.
func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string) {
err := wait.PollImmediate(Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) {
endpoints, err := j.Client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil {
Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err)
e2elog.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err)
return false, nil
}
if len(endpoints.Subsets) == 0 {
Logf("Expect endpoints with subsets, got none.")
e2elog.Logf("Expect endpoints with subsets, got none.")
return false, nil
}
// TODO: Handle multiple endpoints
if len(endpoints.Subsets[0].Addresses) == 0 {
Logf("Expected Ready endpoints - found none")
e2elog.Logf("Expected Ready endpoints - found none")
return false, nil
}
epHostName := *endpoints.Subsets[0].Addresses[0].NodeName
Logf("Pod for service %s/%s is on node %s", namespace, serviceName, epHostName)
e2elog.Logf("Pod for service %s/%s is on node %s", namespace, serviceName, epHostName)
if epHostName != nodeName {
Logf("Found endpoint on wrong node, expected %v, got %v", nodeName, epHostName)
e2elog.Logf("Found endpoint on wrong node, expected %v, got %v", nodeName, epHostName)
return false, nil
}
return true, nil
@ -450,6 +462,7 @@ func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName
ExpectNoError(err)
}
// SanityCheckService performs sanity checks on the given service
func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) {
if svc.Spec.Type != svcType {
Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType)
@ -507,7 +520,7 @@ func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.S
for i := 0; i < 3; i++ {
service, err := j.Client.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("Failed to get Service %q: %v", name, err)
return nil, fmt.Errorf("failed to get Service %q: %v", name, err)
}
update(service)
service, err = j.Client.CoreV1().Services(namespace).Update(service)
@ -515,10 +528,10 @@ func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.S
return service, nil
}
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
return nil, fmt.Errorf("Failed to update Service %q: %v", name, err)
return nil, fmt.Errorf("failed to update Service %q: %v", name, err)
}
}
return nil, fmt.Errorf("Too many retries updating Service %q", name)
return nil, fmt.Errorf("too many retries updating Service %q", name)
}
// UpdateServiceOrFail fetches a service, calls the update function on it, and
@ -532,8 +545,9 @@ func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func
return svc
}
// WaitForNewIngressIPOrFail waits for the given service to get a new ingress IP, or fails after the given timeout
func (j *ServiceTestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP string, timeout time.Duration) *v1.Service {
Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, name)
e2elog.Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, name)
service := j.waitForConditionOrFail(namespace, name, timeout, "have a new ingress IP", func(svc *v1.Service) bool {
if len(svc.Status.LoadBalancer.Ingress) == 0 {
return false
@ -547,6 +561,7 @@ func (j *ServiceTestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP s
return service
}
// ChangeServiceNodePortOrFail changes node ports of the given service.
func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *v1.Service {
var err error
var service *v1.Service
@ -558,7 +573,7 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini
s.Spec.Ports[0].NodePort = int32(newPort)
})
if err != nil && strings.Contains(err.Error(), portallocator.ErrAllocated.Error()) {
Logf("tried nodePort %d, but it is in use, will try another", newPort)
e2elog.Logf("tried nodePort %d, but it is in use, will try another", newPort)
continue
}
// Otherwise err was nil or err was a real error
@ -570,31 +585,27 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini
return service
}
// WaitForLoadBalancerOrFail waits the given service to have a LoadBalancer, or fails after the given timeout
func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service {
Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name)
e2elog.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name)
service := j.waitForConditionOrFail(namespace, name, timeout, "have a load balancer", func(svc *v1.Service) bool {
if len(svc.Status.LoadBalancer.Ingress) > 0 {
return true
}
return false
return len(svc.Status.LoadBalancer.Ingress) > 0
})
return service
}
// WaitForLoadBalancerDestroyOrFail waits the given service to destroy a LoadBalancer, or fails after the given timeout
func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int, timeout time.Duration) *v1.Service {
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
defer func() {
if err := EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil {
Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err)
e2elog.Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err)
}
}()
Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name)
e2elog.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name)
service := j.waitForConditionOrFail(namespace, name, timeout, "have no load balancer", func(svc *v1.Service) bool {
if len(svc.Status.LoadBalancer.Ingress) == 0 {
return true
}
return false
return len(svc.Status.LoadBalancer.Ingress) == 0
})
return service
}
@ -663,6 +674,7 @@ func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationControll
return rc
}
// AddRCAntiAffinity adds AntiAffinity to the given ReplicationController.
func (j *ServiceTestJig) AddRCAntiAffinity(rc *v1.ReplicationController) {
var replicas int32 = 2
@ -682,6 +694,7 @@ func (j *ServiceTestJig) AddRCAntiAffinity(rc *v1.ReplicationController) {
})
}
// CreatePDBOrFail returns a PodDisruptionBudget for the given ReplicationController, or fails if a PodDisruptionBudget isn't ready
func (j *ServiceTestJig) CreatePDBOrFail(namespace string, rc *v1.ReplicationController) *policyv1beta1.PodDisruptionBudget {
pdb := j.newPDBTemplate(namespace, rc)
newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
@ -738,6 +751,7 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.Replicati
return result
}
// Scale scales pods to the given replicas
func (j *ServiceTestJig) Scale(namespace string, replicas int) {
rc := j.Name
scale, err := j.Client.CoreV1().ReplicationControllers(namespace).GetScale(rc, metav1.GetOptions{})
@ -757,7 +771,6 @@ func (j *ServiceTestJig) Scale(namespace string, replicas int) {
if err := j.waitForPodsReady(namespace, pods); err != nil {
Failf("Failed waiting for pods to be running: %v", err)
}
return
}
func (j *ServiceTestJig) waitForPdbReady(namespace string) error {
@ -772,14 +785,14 @@ func (j *ServiceTestJig) waitForPdbReady(namespace string) error {
}
}
return fmt.Errorf("Timeout waiting for PDB %q to be ready", j.Name)
return fmt.Errorf("timeout waiting for PDB %q to be ready", j.Name)
}
func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]string, error) {
timeout := 2 * time.Minute
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(j.Labels))
Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
e2elog.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := j.Client.CoreV1().Pods(namespace).List(options)
@ -795,18 +808,18 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s
found = append(found, pod.Name)
}
if len(found) == replicas {
Logf("Found all %d pods", replicas)
e2elog.Logf("Found all %d pods", replicas)
return found, nil
}
Logf("Found %d/%d pods - will retry", len(found), replicas)
e2elog.Logf("Found %d/%d pods - will retry", len(found), replicas)
}
return nil, fmt.Errorf("Timeout waiting for %d pods to be created", replicas)
return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas)
}
func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error {
timeout := 2 * time.Minute
if !CheckPodsRunningReady(j.Client, namespace, pods, timeout) {
return fmt.Errorf("Timeout waiting for %d pods to be ready", len(pods))
return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods))
}
return nil
}
@ -821,7 +834,7 @@ func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool
Containers: []v1.Container{
{
Name: "netexec",
Image: NetexecImageName,
Image: netexecImageName,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", httpPort),
@ -845,8 +858,9 @@ func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool
return pod
}
// LaunchNetexecPodOnNode launches a netexec pod on the given node.
func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName string, httpPort, udpPort int32, hostNetwork bool) {
Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, f.Namespace.Name)
e2elog.Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, f.Namespace.Name)
pod := newNetexecPodSpec(podName, httpPort, udpPort, hostNetwork)
pod.Spec.NodeName = nodeName
pod.ObjectMeta.Labels = j.Labels
@ -854,7 +868,7 @@ func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName
_, err := podClient.Create(pod)
ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName))
Logf("Netexec pod %q in namespace %q running", pod.Name, f.Namespace.Name)
e2elog.Logf("Netexec pod %q in namespace %q running", pod.Name, f.Namespace.Name)
}
// newEchoServerPodSpec returns the pod spec of echo server pod
@ -882,7 +896,7 @@ func newEchoServerPodSpec(podName string) *v1.Pod {
// as the target for source IP preservation test. The client's source ip would
// be echoed back by the web server.
func (j *ServiceTestJig) LaunchEchoserverPodOnNode(f *Framework, nodeName, podName string) {
Logf("Creating echo server pod %q in namespace %q", podName, f.Namespace.Name)
e2elog.Logf("Creating echo server pod %q in namespace %q", podName, f.Namespace.Name)
pod := newEchoServerPodSpec(podName)
pod.Spec.NodeName = nodeName
pod.ObjectMeta.Labels = j.Labels
@ -890,13 +904,15 @@ func (j *ServiceTestJig) LaunchEchoserverPodOnNode(f *Framework, nodeName, podNa
_, err := podClient.Create(pod)
ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName))
Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name)
e2elog.Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name)
}
// TestReachableHTTP tests that the given host serves HTTP on the given port.
func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.Duration) {
j.TestReachableHTTPWithRetriableErrorCodes(host, port, []int{}, timeout)
}
// TestReachableHTTPWithRetriableErrorCodes tests that the given host serves HTTP on the given port with the given retriableErrCodes.
func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := PokeHTTP(host, port, "/echo?msg=hello",
@ -919,6 +935,7 @@ func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, p
}
}
// TestNotReachableHTTP tests that a HTTP request doesn't connect to the given host and port.
func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := PokeHTTP(host, port, "/", nil)
@ -933,6 +950,7 @@ func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout tim
}
}
// TestRejectedHTTP tests that the given host rejects a HTTP request on the given port.
func (j *ServiceTestJig) TestRejectedHTTP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := PokeHTTP(host, port, "/", nil)
@ -947,6 +965,7 @@ func (j *ServiceTestJig) TestRejectedHTTP(host string, port int, timeout time.Du
}
}
// TestReachableUDP tests that the given host serves UDP on the given port.
func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := PokeUDP(host, port, "echo hello", &UDPPokeParams{
@ -964,6 +983,7 @@ func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Du
}
}
// TestNotReachableUDP tests that the given host doesn't serve UDP on the given port.
func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second})
@ -977,6 +997,7 @@ func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time
}
}
// TestRejectedUDP tests that the given host rejects a UDP request on the given port.
func (j *ServiceTestJig) TestRejectedUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second})
@ -990,6 +1011,7 @@ func (j *ServiceTestJig) TestRejectedUDP(host string, port int, timeout time.Dur
}
}
// GetHTTPContent returns the content of the given url by HTTP.
func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer {
var body bytes.Buffer
if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) {
@ -1010,17 +1032,17 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err
url := fmt.Sprintf("http://%s%s", ipPort, request)
if ip == "" || port == 0 {
Failf("Got empty IP for reachability check (%s)", url)
return false, fmt.Errorf("Invalid input ip or port")
return false, fmt.Errorf("invalid input ip or port")
}
Logf("Testing HTTP health check on %v", url)
e2elog.Logf("Testing HTTP health check on %v", url)
resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second)
if err != nil {
Logf("Got error testing for reachability of %s: %v", url, err)
e2elog.Logf("Got error testing for reachability of %s: %v", url, err)
return false, err
}
defer resp.Body.Close()
if err != nil {
Logf("Got error reading response from %s: %v", url, err)
e2elog.Logf("Got error reading response from %s: %v", url, err)
return false, err
}
// HealthCheck responder returns 503 for no local endpoints
@ -1031,9 +1053,10 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err
if resp.StatusCode == 200 {
return true, nil
}
return false, fmt.Errorf("Unexpected HTTP response code %s from health check responder at %s", resp.Status, url)
return false, fmt.Errorf("unexpected HTTP response code %s from health check responder at %s", resp.Status, url)
}
// TestHTTPHealthCheckNodePort tests a HTTP connection by the given request to the given host and port.
func (j *ServiceTestJig) TestHTTPHealthCheckNodePort(host string, port int, request string, timeout time.Duration, expectSucceed bool, threshold int) error {
count := 0
condition := func() (bool, error) {
@ -1054,13 +1077,13 @@ func (j *ServiceTestJig) TestHTTPHealthCheckNodePort(host string, port int, requ
return nil
}
// Simple helper class to avoid too much boilerplate in tests
// ServiceTestFixture is a simple helper class to avoid too much boilerplate in tests
type ServiceTestFixture struct {
ServiceName string
Namespace string
Client clientset.Interface
TestId string
TestID string
Labels map[string]string
rcs map[string]bool
@ -1069,14 +1092,15 @@ type ServiceTestFixture struct {
Image string
}
// NewServerTest creates a new ServiceTestFixture for the tests.
func NewServerTest(client clientset.Interface, namespace string, serviceName string) *ServiceTestFixture {
t := &ServiceTestFixture{}
t.Client = client
t.Namespace = namespace
t.ServiceName = serviceName
t.TestId = t.ServiceName + "-" + string(uuid.NewUUID())
t.TestID = t.ServiceName + "-" + string(uuid.NewUUID())
t.Labels = map[string]string{
"testid": t.TestId,
"testid": t.TestID,
}
t.rcs = make(map[string]bool)
@ -1088,7 +1112,7 @@ func NewServerTest(client clientset.Interface, namespace string, serviceName str
return t
}
// Build default config for a service (which can then be changed)
// BuildServiceSpec builds default config for a service (which can then be changed)
func (t *ServiceTestFixture) BuildServiceSpec() *v1.Service {
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
@ -1106,20 +1130,6 @@ func (t *ServiceTestFixture) BuildServiceSpec() *v1.Service {
return service
}
// CreateWebserverRC creates rc-backed pods with the well-known webserver
// configuration and records it for cleanup.
func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *v1.ReplicationController {
rcSpec := RcByNamePort(t.Name, replicas, t.Image, 80, v1.ProtocolTCP, t.Labels, nil)
rcAct, err := t.CreateRC(rcSpec)
if err != nil {
Failf("Failed to create rc %s: %v", rcSpec.Name, err)
}
if err := VerifyPods(t.Client, t.Namespace, t.Name, false, replicas); err != nil {
Failf("Failed to create %d pods with name %s: %v", replicas, t.Name, err)
}
return rcAct
}
// CreateRC creates a replication controller and records it for cleanup.
func (t *ServiceTestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) {
rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(rc)
@ -1129,7 +1139,7 @@ func (t *ServiceTestFixture) CreateRC(rc *v1.ReplicationController) (*v1.Replica
return rc, err
}
// Create a service, and record it for cleanup
// CreateService creates a service, and record it for cleanup
func (t *ServiceTestFixture) CreateService(service *v1.Service) (*v1.Service, error) {
result, err := t.Client.CoreV1().Services(t.Namespace).Create(service)
if err == nil {
@ -1138,7 +1148,7 @@ func (t *ServiceTestFixture) CreateService(service *v1.Service) (*v1.Service, er
return result, err
}
// Delete a service, and remove it from the cleanup list
// DeleteService deletes a service, and remove it from the cleanup list
func (t *ServiceTestFixture) DeleteService(serviceName string) error {
err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil)
if err == nil {
@ -1147,10 +1157,11 @@ func (t *ServiceTestFixture) DeleteService(serviceName string) error {
return err
}
// Cleanup cleans all ReplicationControllers and Services which this object holds.
func (t *ServiceTestFixture) Cleanup() []error {
var errs []error
for rcName := range t.rcs {
By("stopping RC " + rcName + " in namespace " + t.Namespace)
ginkgo.By("stopping RC " + rcName + " in namespace " + t.Namespace)
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// First, resize the RC to 0.
old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{})
@ -1183,7 +1194,7 @@ func (t *ServiceTestFixture) Cleanup() []error {
}
for serviceName := range t.services {
By("deleting service " + serviceName + " in namespace " + t.Namespace)
ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace)
err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil)
if err != nil {
if !errors.IsNotFound(err) {
@ -1195,6 +1206,7 @@ func (t *ServiceTestFixture) Cleanup() []error {
return errs
}
// GetIngressPoint returns a host on which ingress serves.
func GetIngressPoint(ing *v1.LoadBalancerIngress) string {
host := ing.IP
if host == "" {
@ -1226,105 +1238,12 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update
return service, err
}
func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID {
m := PortsByPodUID{}
for _, ss := range endpoints.Subsets {
for _, port := range ss.Ports {
for _, addr := range ss.Addresses {
containerPort := port.Port
if _, ok := m[addr.TargetRef.UID]; !ok {
m[addr.TargetRef.UID] = make([]int, 0)
}
m[addr.TargetRef.UID] = append(m[addr.TargetRef.UID], int(containerPort))
}
}
}
return m
}
type PortsByPodName map[string][]int
type PortsByPodUID map[types.UID][]int
func translatePodNameToUIDOrFail(c clientset.Interface, ns string, expectedEndpoints PortsByPodName) PortsByPodUID {
portsByUID := make(PortsByPodUID)
for name, portList := range expectedEndpoints {
pod, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
if err != nil {
Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
}
portsByUID[pod.ObjectMeta.UID] = portList
}
// Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns)
return portsByUID
}
func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUID) {
if len(endpoints) != len(expectedEndpoints) {
// should not happen because we check this condition before
Failf("invalid number of endpoints got %v, expected %v", endpoints, expectedEndpoints)
}
for podUID := range expectedEndpoints {
if _, ok := endpoints[podUID]; !ok {
Failf("endpoint %v not found", podUID)
}
if len(endpoints[podUID]) != len(expectedEndpoints[podUID]) {
Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID])
}
sort.Ints(endpoints[podUID])
sort.Ints(expectedEndpoints[podUID])
for index := range endpoints[podUID] {
if endpoints[podUID][index] != expectedEndpoints[podUID][index] {
Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID])
}
}
}
}
func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints PortsByPodName) {
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", ServiceStartTimeout, serviceName, namespace, expectedEndpoints))
i := 1
for start := time.Now(); time.Since(start) < ServiceStartTimeout; time.Sleep(1 * time.Second) {
endpoints, err := c.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil {
Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
continue
}
// Logf("Found endpoints %v", endpoints)
portsByPodUID := GetContainerPortsByPodUID(endpoints)
// Logf("Found port by pod UID %v", portsByPodUID)
expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints)
if len(portsByPodUID) == len(expectedEndpoints) {
validatePortsOrFail(portsByPodUID, expectedPortsByPodUID)
Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
serviceName, namespace, expectedEndpoints, time.Since(start))
return
}
if i%5 == 0 {
Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
}
i++
}
if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil {
for _, pod := range pods.Items {
Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
}
} else {
Logf("Can't list pod debug info: %v", err)
}
Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, ServiceStartTimeout)
}
// StartServeHostnameService creates a replication controller that serves its
// hostname and a service on top of it.
func StartServeHostnameService(c clientset.Interface, internalClient internalclientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) {
func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) {
podNames := make([]string, replicas)
name := svc.ObjectMeta.Name
By("creating service " + name + " in namespace " + ns)
ginkgo.By("creating service " + name + " in namespace " + ns)
_, err := c.CoreV1().Services(ns).Create(svc)
if err != nil {
return podNames, "", err
@ -1334,7 +1253,6 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli
maxContainerFailures := 0
config := testutils.RCConfig{
Client: c,
InternalClient: internalClient,
Image: ServeHostnameImage,
Name: name,
Namespace: ns,
@ -1350,7 +1268,7 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli
}
if len(createdPods) != replicas {
return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods))
return podNames, "", fmt.Errorf("incorrect number of running pods: %v", len(createdPods))
}
for i := range createdPods {
@ -1363,12 +1281,13 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli
return podNames, "", err
}
if service.Spec.ClusterIP == "" {
return podNames, "", fmt.Errorf("Service IP is blank for %v", name)
return podNames, "", fmt.Errorf("service IP is blank for %v", name)
}
serviceIP := service.Spec.ClusterIP
return podNames, serviceIP, nil
}
// StopServeHostnameService stops the given service.
func StopServeHostnameService(clientset clientset.Interface, ns, name string) error {
if err := DeleteRCAndWaitForGC(clientset, ns, name); err != nil {
return err
@ -1400,29 +1319,29 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
// verify service from node
func() string {
cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -")
Logf("Executing cmd %q on host %v", cmd, host)
result, err := SSH(cmd, host, TestContext.Provider)
e2elog.Logf("Executing cmd %q on host %v", cmd, host)
result, err := e2essh.SSH(cmd, host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
Logf("error while SSH-ing to node: %v", err)
e2essh.LogResult(result)
e2elog.Logf("error while SSH-ing to node: %v", err)
}
return result.Stdout
},
// verify service from pod
func() string {
cmd := buildCommand("wget -q -T 1 -O -")
Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName)
e2elog.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName)
// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
output, err := RunHostCmd(ns, execPodName, cmd)
if err != nil {
Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output)
e2elog.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output)
}
return output
},
}
expectedEndpoints := sets.NewString(expectedPods...)
By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
ginkgo.By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
for _, cmdFunc := range commands {
passed := false
gotEndpoints := sets.NewString()
@ -1441,12 +1360,12 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
// and we need a better way to track how often it occurs.
if gotEndpoints.IsSuperset(expectedEndpoints) {
if !gotEndpoints.Equal(expectedEndpoints) {
Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints))
e2elog.Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints))
}
passed = true
break
}
Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints))
e2elog.Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints))
}
if !passed {
// Sort the lists so they're easier to visually diff.
@ -1460,6 +1379,7 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
return nil
}
// VerifyServeHostnameServiceDown verifies that the given service isn't served.
func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error {
ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
// The current versions of curl included in CentOS and RHEL distros
@ -1469,30 +1389,33 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI
"curl -g -s --connect-timeout 2 http://%s && exit 99", ipPort)
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := SSH(command, host, TestContext.Provider)
result, err := e2essh.SSH(command, host, TestContext.Provider)
if err != nil {
LogSSHResult(result)
Logf("error while SSH-ing to node: %v", err)
e2essh.LogResult(result)
e2elog.Logf("error while SSH-ing to node: %v", err)
}
if result.Code != 99 {
return nil
}
Logf("service still alive - still waiting")
e2elog.Logf("service still alive - still waiting")
}
return fmt.Errorf("waiting for service to be down timed out")
}
// CleanupServiceResources cleans up service Type=LoadBalancer resources.
func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone)
}
// DescribeSvc logs the output of kubectl describe svc for the given namespace
func DescribeSvc(ns string) {
Logf("\nOutput of kubectl describe svc:\n")
e2elog.Logf("\nOutput of kubectl describe svc:\n")
desc, _ := RunKubectl(
"describe", "svc", fmt.Sprintf("--namespace=%v", ns))
Logf(desc)
e2elog.Logf(desc)
}
// CreateServiceSpec returns a Service object for testing.
func CreateServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service {
headlessService := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
@ -1522,6 +1445,7 @@ func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(sv
return TestContext.CloudConfig.Provider.EnableAndDisableInternalLB()
}
// GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service.
func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration {
if nodes := GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > LargeClusterMinNodesNumber {
return LoadBalancerCreateTimeoutLarge
@ -1537,7 +1461,7 @@ type affinityTracker struct {
// Record the response going to a given host.
func (at *affinityTracker) recordHost(host string) {
at.hostTrace = append(at.hostTrace, host)
Logf("Received response from host: %s", host)
e2elog.Logf("Received response from host: %s", host)
}
// Check that we got a constant count requests going to the same host.
@ -1560,7 +1484,7 @@ func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds b
}
func checkAffinityFailed(tracker affinityTracker, err string) {
Logf("%v", tracker.hostTrace)
e2elog.Logf("%v", tracker.hostTrace)
Failf(err)
}
@ -1569,9 +1493,9 @@ func checkAffinityFailed(tracker affinityTracker, err string) {
// number of same response observed in a row. If affinity is not expected, the
// test will keep observe until different responses observed. The function will
// return false only in case of unexpected errors.
func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, targetPort int, shouldHold bool) bool {
targetIpPort := net.JoinHostPort(targetIp, strconv.Itoa(targetPort))
cmd := fmt.Sprintf(`wget -qO- http://%s/ -T 2`, targetIpPort)
func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIP string, targetPort int, shouldHold bool) bool {
targetIPPort := net.JoinHostPort(targetIP, strconv.Itoa(targetPort))
cmd := fmt.Sprintf(`wget -qO- http://%s/ -T 2`, targetIPPort)
timeout := ServiceTestTimeout
if execPod == nil {
timeout = LoadBalancerPollTimeout
@ -1579,14 +1503,14 @@ func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, target
var tracker affinityTracker
if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) {
if execPod != nil {
if stdout, err := RunHostCmd(execPod.Namespace, execPod.Name, cmd); err != nil {
Logf("Failed to get response from %s. Retry until timeout", targetIpPort)
stdout, err := RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil {
e2elog.Logf("Failed to get response from %s. Retry until timeout", targetIPPort)
return false, nil
} else {
tracker.recordHost(stdout)
}
tracker.recordHost(stdout)
} else {
rawResponse := jig.GetHTTPContent(targetIp, targetPort, timeout, "")
rawResponse := jig.GetHTTPContent(targetIP, targetPort, timeout, "")
tracker.recordHost(rawResponse.String())
}
trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount)
@ -1602,17 +1526,16 @@ func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, target
if pollErr != wait.ErrWaitTimeout {
checkAffinityFailed(tracker, pollErr.Error())
return false
} else {
if !trackerFulfilled {
checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", targetIpPort))
}
if shouldHold {
checkAffinityFailed(tracker, "Affinity should hold but didn't.")
} else {
checkAffinityFailed(tracker, "Affinity shouldn't hold but did.")
}
return true
}
if !trackerFulfilled {
checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", targetIPPort))
}
if shouldHold {
checkAffinityFailed(tracker, "Affinity should hold but didn't.")
} else {
checkAffinityFailed(tracker, "Affinity shouldn't hold but did.")
}
return true
}
return true
}

View File

@ -19,6 +19,8 @@ package framework
import (
"fmt"
"time"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
const (
@ -26,6 +28,7 @@ const (
resizeNodeNotReadyTimeout = 2 * time.Minute
)
// ResizeGroup resizes an instance group
func ResizeGroup(group string, size int32) error {
if TestContext.ReportDir != "" {
CoreDump(TestContext.ReportDir)
@ -34,27 +37,30 @@ func ResizeGroup(group string, size int32) error {
return TestContext.CloudConfig.Provider.ResizeGroup(group, size)
}
// GetGroupNodes returns a node name for the specified node group
func GetGroupNodes(group string) ([]string, error) {
return TestContext.CloudConfig.Provider.GetGroupNodes(group)
}
// GroupSize returns the size of an instance group
func GroupSize(group string) (int, error) {
return TestContext.CloudConfig.Provider.GroupSize(group)
}
// WaitForGroupSize waits for node instance group reached the desired size
func WaitForGroupSize(group string, size int32) error {
timeout := 30 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
currentSize, err := GroupSize(group)
if err != nil {
Logf("Failed to get node instance group size: %v", err)
e2elog.Logf("Failed to get node instance group size: %v", err)
continue
}
if currentSize != int(size) {
Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
e2elog.Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
continue
}
Logf("Node instance group has reached the desired size %d", size)
e2elog.Logf("Node instance group has reached the desired size %d", size)
return nil
}
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
package ssh
import (
"bytes"
@ -24,11 +24,29 @@ import (
"path/filepath"
"time"
"github.com/onsi/gomega"
"golang.org/x/crypto/ssh"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
sshutil "k8s.io/kubernetes/pkg/ssh"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)
const (
// ssh port
sshPort = "22"
// pollNodeInterval is how often to Poll pods.
pollNodeInterval = 2 * time.Second
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
singleCallTimeout = 5 * time.Minute
)
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
@ -86,15 +104,15 @@ func GetSigner(provider string) (ssh.Signer, error) {
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)
hosts := NodeAddresses(nodelist, v1.NodeExternalIP)
hosts := nodeAddresses(nodelist, v1.NodeExternalIP)
// If ExternalIPs aren't set, assume the test programs can reach the
// InternalIP. Simplified exception logic here assumes that the hosts will
// either all have ExternalIP or none will. Simplifies handling here and
// should be adequate since the setting of the external IPs is provider
// specific: they should either all have them or none of them will.
if len(hosts) == 0 {
Logf("No external IP address on nodes, falling back to internal IPs")
hosts = NodeAddresses(nodelist, v1.NodeInternalIP)
e2elog.Logf("No external IP address on nodes, falling back to internal IPs")
hosts = nodeAddresses(nodelist, v1.NodeInternalIP)
}
// Error if any node didn't have an external/internal IP.
@ -111,7 +129,8 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
return sshHosts, nil
}
type SSHResult struct {
// Result holds the execution result of SSH command
type Result struct {
User string
Host string
Cmd string
@ -123,15 +142,15 @@ type SSHResult struct {
// NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh.
func NodeExec(nodeName, cmd string) (SSHResult, error) {
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), TestContext.Provider)
func NodeExec(nodeName, cmd, provider string) (Result, error) {
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), provider)
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (SSHResult, error) {
result := SSHResult{Host: host, Cmd: cmd}
func SSH(cmd, host, provider string) (Result, error) {
result := Result{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := GetSigner(provider)
@ -230,19 +249,21 @@ func RunSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer)
return bout.String(), berr.String(), code, err
}
func LogSSHResult(result SSHResult) {
// LogResult records result log
func LogResult(result Result) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
Logf("ssh %s: command: %s", remote, result.Cmd)
Logf("ssh %s: stdout: %q", remote, result.Stdout)
Logf("ssh %s: stderr: %q", remote, result.Stderr)
Logf("ssh %s: exit code: %d", remote, result.Code)
e2elog.Logf("ssh %s: command: %s", remote, result.Cmd)
e2elog.Logf("ssh %s: stdout: %q", remote, result.Stdout)
e2elog.Logf("ssh %s: stderr: %q", remote, result.Stderr)
e2elog.Logf("ssh %s: exit code: %d", remote, result.Code)
}
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult, error) {
Logf("Getting external IP address for %s", node.Name)
// IssueSSHCommandWithResult tries to execute a SSH command and returns the execution result
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, error) {
e2elog.Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
if a.Type == v1.NodeExternalIP && a.Address != "" {
host = net.JoinHostPort(a.Address, sshPort)
break
}
@ -251,7 +272,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult,
if host == "" {
// No external IPs were found, let's try to use internal as plan B
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeInternalIP {
if a.Type == v1.NodeInternalIP && a.Address != "" {
host = net.JoinHostPort(a.Address, sshPort)
break
}
@ -262,9 +283,9 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult,
return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name)
}
Logf("SSH %q on %s(%s)", cmd, node.Name, host)
e2elog.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
LogResult(result)
if result.Code != 0 || err != nil {
return nil, fmt.Errorf("failed running %q: %v (exit code %d, stderr %v)",
@ -274,6 +295,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult,
return &result, nil
}
// IssueSSHCommand tries to execute a SSH command
func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
_, err := IssueSSHCommandWithResult(cmd, provider, node)
if err != nil {
@ -281,3 +303,61 @@ func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
}
return nil
}
// nodeAddresses returns the first address of the given type of each node.
func nodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
if addr.Type == addrType && addr.Address != "" {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(pollNodeInterval, singleCallTimeout, func() (bool, error) {
nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
return nodes, err
}
return nodes, nil
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
expectNoError(err, "Non-retryable failure or timed out while listing nodes for e2e cluster.")
}
return nodes
}
// expectNoError checks if "err" is set, and if so, fails assertion while logging the error.
func expectNoError(err error, explain ...interface{}) {
expectNoErrorWithOffset(1, err, explain...)
}
// expectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
}
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
}

View File

@ -38,16 +38,17 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/manifest"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
// Poll interval for StatefulSet tests
// StatefulSetPoll is a poll interval for StatefulSet tests
StatefulSetPoll = 10 * time.Second
// Timeout interval for StatefulSet operations
// StatefulSetTimeout is a timeout interval for StatefulSet operations
StatefulSetTimeout = 10 * time.Minute
// Timeout for stateful pods to change state
// StatefulPodTimeout is a timeout for stateful pods to change state
StatefulPodTimeout = 5 * time.Minute
)
@ -94,18 +95,18 @@ func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.Sta
return filepath.Join(manifestPath, file)
}
Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
e2elog.Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
ExpectNoError(err)
Logf("Parsing service from %v", mkpath("service.yaml"))
e2elog.Logf("Parsing service from %v", mkpath("service.yaml"))
svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
ExpectNoError(err)
Logf(fmt.Sprintf("creating " + ss.Name + " service"))
e2elog.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
_, err = s.c.CoreV1().Services(ns).Create(svc)
ExpectNoError(err)
Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
e2elog.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
_, err = s.c.AppsV1().StatefulSets(ns).Create(ss)
ExpectNoError(err)
s.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
@ -134,7 +135,7 @@ func (s *StatefulSetTester) ExecInStatefulPods(ss *apps.StatefulSet, cmd string)
podList := s.GetPodList(ss)
for _, statefulPod := range podList.Items {
stdout, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
e2elog.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
if err != nil {
return err
}
@ -162,9 +163,9 @@ func (s *StatefulSetTester) CheckHostname(ss *apps.StatefulSet) error {
func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) {
var i int32
for i = 0; i < *(ss.Spec.Replicas); i++ {
Logf("Waiting for stateful pod at index %v to enter Running", i)
e2elog.Logf("Waiting for stateful pod at index %v to enter Running", i)
s.WaitForRunning(i+1, i, ss)
Logf("Resuming stateful pod at index %v", i)
e2elog.Logf("Resuming stateful pod at index %v", i)
s.ResumeNextPod(ss)
}
}
@ -200,7 +201,7 @@ func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) (*apps.Stat
name := ss.Name
ns := ss.Namespace
Logf("Scaling statefulset %s to %d", name, count)
e2elog.Logf("Scaling statefulset %s to %d", name, count)
ss = s.update(ns, name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count })
var statefulPodList *v1.PodList
@ -282,12 +283,12 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.Stateful
if hard {
Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
} else {
Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
e2elog.Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
}
time.Sleep(1 * time.Second)
continue
}
Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
e2elog.Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
time.Sleep(1 * time.Second)
}
}
@ -300,7 +301,7 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s
podList := s.GetPodList(ss)
s.SortStatefulPods(podList)
if int32(len(podList.Items)) < numPodsRunning {
Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning)
e2elog.Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning)
return false, nil
}
if int32(len(podList.Items)) > numPodsRunning {
@ -310,7 +311,7 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s
shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady)
isReady := podutil.IsPodReady(&p)
desiredReadiness := shouldBeReady == isReady
Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
e2elog.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
if p.Status.Phase != v1.PodRunning || !desiredReadiness {
return false, nil
}
@ -407,14 +408,14 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *apps.StatefulSet) (*apps.S
return false, nil
}
if set.Status.UpdateRevision != set.Status.CurrentRevision {
Logf("Waiting for StatefulSet %s/%s to complete update",
e2elog.Logf("Waiting for StatefulSet %s/%s to complete update",
set.Namespace,
set.Name,
)
s.SortStatefulPods(pods)
for i := range pods.Items {
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
set.Status.UpdateRevision,
@ -453,14 +454,14 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
return false, nil
}
if partition <= 0 && set.Status.UpdateRevision != set.Status.CurrentRevision {
Logf("Waiting for StatefulSet %s/%s to complete update",
e2elog.Logf("Waiting for StatefulSet %s/%s to complete update",
set.Namespace,
set.Name,
)
s.SortStatefulPods(pods)
for i := range pods.Items {
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
set.Status.UpdateRevision,
@ -468,16 +469,15 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
}
}
return false, nil
} else {
for i := int(*set.Spec.Replicas) - 1; i >= partition; i-- {
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
set.Status.UpdateRevision,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
return false, nil
}
}
for i := int(*set.Spec.Replicas) - 1; i >= partition; i-- {
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
set.Status.UpdateRevision,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
return false, nil
}
}
return true, nil
@ -485,7 +485,7 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
return set, pods
}
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and not Ready.
// WaitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready.
func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *apps.StatefulSet) {
s.WaitForRunning(numStatefulPods, 0, ss)
}
@ -502,15 +502,15 @@ var httpProbe = &v1.Probe{
FailureThreshold: 1,
}
// SetHttpProbe sets the pod template's ReadinessProbe for Nginx StatefulSet containers.
// This probe can then be controlled with BreakHttpProbe() and RestoreHttpProbe().
// SetHTTPProbe sets the pod template's ReadinessProbe for Nginx StatefulSet containers.
// This probe can then be controlled with BreakHTTPProbe() and RestoreHTTPProbe().
// Note that this cannot be used together with PauseNewPods().
func (s *StatefulSetTester) SetHttpProbe(ss *apps.StatefulSet) {
func (s *StatefulSetTester) SetHTTPProbe(ss *apps.StatefulSet) {
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = httpProbe
}
// BreakHttpProbe breaks the readiness probe for Nginx StatefulSet containers in ss.
func (s *StatefulSetTester) BreakHttpProbe(ss *apps.StatefulSet) error {
// BreakHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in ss.
func (s *StatefulSetTester) BreakHTTPProbe(ss *apps.StatefulSet) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -520,8 +520,8 @@ func (s *StatefulSetTester) BreakHttpProbe(ss *apps.StatefulSet) error {
return s.ExecInStatefulPods(ss, cmd)
}
// BreakPodHttpProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
func (s *StatefulSetTester) BreakPodHttpProbe(ss *apps.StatefulSet, pod *v1.Pod) error {
// BreakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
func (s *StatefulSetTester) BreakPodHTTPProbe(ss *apps.StatefulSet, pod *v1.Pod) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -529,12 +529,12 @@ func (s *StatefulSetTester) BreakPodHttpProbe(ss *apps.StatefulSet, pod *v1.Pod)
// Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/ || true", path)
stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
return err
}
// RestoreHttpProbe restores the readiness probe for Nginx StatefulSet containers in ss.
func (s *StatefulSetTester) RestoreHttpProbe(ss *apps.StatefulSet) error {
// RestoreHTTPProbe restores the readiness probe for Nginx StatefulSet containers in ss.
func (s *StatefulSetTester) RestoreHTTPProbe(ss *apps.StatefulSet) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -544,8 +544,8 @@ func (s *StatefulSetTester) RestoreHttpProbe(ss *apps.StatefulSet) error {
return s.ExecInStatefulPods(ss, cmd)
}
// RestorePodHttpProbe restores the readiness probe for Nginx StatefulSet containers in pod.
func (s *StatefulSetTester) RestorePodHttpProbe(ss *apps.StatefulSet, pod *v1.Pod) error {
// RestorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod.
func (s *StatefulSetTester) RestorePodHTTPProbe(ss *apps.StatefulSet, pod *v1.Pod) error {
path := httpProbe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@ -553,7 +553,7 @@ func (s *StatefulSetTester) RestorePodHttpProbe(ss *apps.StatefulSet, pod *v1.Po
// Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/ || true", path)
stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
return err
}
@ -574,7 +574,7 @@ func hasPauseProbe(pod *v1.Pod) bool {
// PauseNewPods adds an always-failing ReadinessProbe to the StatefulSet PodTemplate.
// This causes all newly-created Pods to stay Unready until they are manually resumed
// with ResumeNextPod().
// Note that this cannot be used together with SetHttpProbe().
// Note that this cannot be used together with SetHTTPProbe().
func (s *StatefulSetTester) PauseNewPods(ss *apps.StatefulSet) {
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = pauseProbe
}
@ -599,14 +599,14 @@ func (s *StatefulSetTester) ResumeNextPod(ss *apps.StatefulSet) {
}
_, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
ExpectNoError(err)
Logf("Resumed pod %v", pod.Name)
e2elog.Logf("Resumed pod %v", pod.Name)
resumedPod = pod.Name
}
}
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
@ -619,7 +619,7 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, exp
return false, nil
}
if ssGet.Status.ReadyReplicas != expectedReplicas {
Logf("Waiting for stateful set status.readyReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.ReadyReplicas)
e2elog.Logf("Waiting for stateful set status.readyReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.ReadyReplicas)
return false, nil
}
return true, nil
@ -631,7 +631,7 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, exp
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
@ -644,7 +644,7 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expected
return false, nil
}
if ssGet.Status.Replicas != expectedReplicas {
Logf("Waiting for stateful set status.replicas to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
e2elog.Logf("Waiting for stateful set status.replicas to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
return false, nil
}
return true, nil
@ -655,8 +655,8 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expected
}
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
func (p *StatefulSetTester) CheckServiceName(ss *apps.StatefulSet, expectedServiceName string) error {
Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
func (s *StatefulSetTester) CheckServiceName(ss *apps.StatefulSet, expectedServiceName string) error {
e2elog.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
if expectedServiceName != ss.Spec.ServiceName {
return fmt.Errorf("Wrong service name governing statefulset. Expected %s got %s",
@ -687,7 +687,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
errList = append(errList, fmt.Sprintf("%v", err))
}
sst.WaitForStatusReplicas(ss, 0)
Logf("Deleting statefulset %v", ss.Name)
e2elog.Logf("Deleting statefulset %v", ss.Name)
// Use OrphanDependents=false so it's deleted synchronously.
// We already made sure the Pods are gone inside Scale().
if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
@ -701,13 +701,13 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
Logf("WARNING: Failed to list pvcs, retrying %v", err)
e2elog.Logf("WARNING: Failed to list pvcs, retrying %v", err)
return false, nil
}
for _, pvc := range pvcList.Items {
pvNames.Insert(pvc.Spec.VolumeName)
// TODO: Double check that there are no pods referencing the pvc
Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
e2elog.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
return false, nil
}
@ -721,7 +721,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
Logf("WARNING: Failed to list pvs, retrying %v", err)
e2elog.Logf("WARNING: Failed to list pvs, retrying %v", err)
return false, nil
}
waitingFor := []string{}
@ -733,7 +733,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
if len(waitingFor) == 0 {
return true, nil
}
Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
e2elog.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
return false, nil
})
if pollErr != nil {
@ -807,9 +807,10 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.Nginx),
VolumeMounts: mounts,
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.Nginx),
VolumeMounts: mounts,
SecurityContext: &v1.SecurityContext{},
},
},
Volumes: vols,
@ -869,6 +870,7 @@ func (sp statefulPodsByOrdinal) Less(i, j int) bool {
type updateStatefulSetFunc func(*apps.StatefulSet)
// UpdateStatefulSetWithRetries updates statfulset template with retries.
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) {
statefulSets := c.AppsV1().StatefulSets(namespace)
var updateErr error
@ -879,7 +881,7 @@ func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string,
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(statefulSet)
if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
Logf("Updating stateful set %s", name)
e2elog.Logf("Updating stateful set %s", name)
return true, nil
}
updateErr = err

View File

@ -33,6 +33,7 @@ import (
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
const (
@ -100,7 +101,6 @@ type TestContextType struct {
SystemPodsStartupTimeout time.Duration
EtcdUpgradeStorage string
EtcdUpgradeVersion string
IngressUpgradeImage string
GCEUpgradeScript string
ContainerRuntime string
ContainerRuntimeEndpoint string
@ -204,8 +204,9 @@ type NodeTestContextType struct {
ExtraEnvs map[string]string
}
// CloudConfig holds the cloud configuration for e2e test suites.
type CloudConfig struct {
ApiEndpoint string
APIEndpoint string
ProjectID string
Zone string // for multizone tests, arbitrarily chosen zone
Region string
@ -225,9 +226,10 @@ type CloudConfig struct {
Provider ProviderInterface
}
// TestContext should be used by all tests to access common context data.
var TestContext TestContextType
// Register flags common to all e2e test suites.
// RegisterCommonFlags registers flags common to all e2e test suites.
func RegisterCommonFlags() {
// Turn on verbose by default to get spec names
config.DefaultReporterConfig.Verbose = true
@ -270,7 +272,7 @@ func RegisterCommonFlags() {
flag.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for runnning tests.")
}
// Register flags specific to the cluster e2e test suite.
// RegisterClusterFlags registers flags specific to the cluster e2e test suite.
func RegisterClusterFlags() {
flag.BoolVar(&TestContext.VerifyServiceAccount, "e2e-verify-service-account", true, "If true tests will verify the service account before running.")
flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.")
@ -294,7 +296,7 @@ func RegisterClusterFlags() {
// TODO: Flags per provider? Rename gce-project/gce-zone?
cloudConfig := &TestContext.CloudConfig
flag.StringVar(&cloudConfig.MasterName, "kube-master", "", "Name of the kubernetes master. Only required if provider is gce or gke")
flag.StringVar(&cloudConfig.ApiEndpoint, "gce-api-endpoint", "", "The GCE APIEndpoint being used, if applicable")
flag.StringVar(&cloudConfig.APIEndpoint, "gce-api-endpoint", "", "The GCE APIEndpoint being used, if applicable")
flag.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable")
flag.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
flag.StringVar(&cloudConfig.Region, "gce-region", "", "GCE region being used, if applicable")
@ -316,7 +318,6 @@ func RegisterClusterFlags() {
flag.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.")
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
@ -328,7 +329,7 @@ func RegisterClusterFlags() {
flag.DurationVar(&nodeKiller.SimulatedDowntime, "node-killer-simulated-downtime", 10*time.Minute, "A delay between node death and recreation")
}
// Register flags specific to the node e2e test suite.
// RegisterNodeFlags registers flags specific to the node e2e test suite.
func RegisterNodeFlags() {
// Mark the test as node e2e when node flags are api.Registry.
TestContext.NodeE2E = true
@ -421,7 +422,7 @@ func AfterReadingAllFlags(t *TestContextType) {
if TestContext.Provider == "" {
// Some users of the e2e.test binary pass --provider=.
// We need to support that, changing it would break those usages.
Logf("The --provider flag is not set. Continuing as if --provider=skeleton had been used.")
e2elog.Logf("The --provider flag is not set. Continuing as if --provider=skeleton had been used.")
TestContext.Provider = "skeleton"
}

View File

@ -1,105 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"os/exec"
"path"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/version"
clientset "k8s.io/client-go/kubernetes"
)
// RealVersion turns a version constants into a version string deployable on
// GKE. See hack/get-build.sh for more information.
func RealVersion(s string) (string, error) {
Logf("Getting real version for %q", s)
v, _, err := RunCmd(path.Join(TestContext.RepoRoot, "hack/get-build.sh"), "-v", s)
if err != nil {
return v, err
}
Logf("Version for %q is %q", s, v)
return strings.TrimPrefix(strings.TrimSpace(v), "v"), nil
}
func traceRouteToMaster() {
path, err := exec.LookPath("traceroute")
if err != nil {
Logf("Could not find traceroute program")
return
}
cmd := exec.Command(path, "-I", GetMasterHost())
out, err := cmd.Output()
if len(out) != 0 {
Logf(string(out))
}
if exiterr, ok := err.(*exec.ExitError); err != nil && ok {
Logf("error while running traceroute: %s", exiterr.Stderr)
}
}
func CheckMasterVersion(c clientset.Interface, want string) error {
Logf("Checking master version")
var err error
var v *version.Info
waitErr := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
v, err = c.Discovery().ServerVersion()
if err != nil {
traceRouteToMaster()
return false, nil
}
return true, nil
})
if waitErr != nil {
return fmt.Errorf("CheckMasterVersion() couldn't get the master version: %v", err)
}
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4
// got looks like: v0.19.3-815-g50e67d4034e858-dirty
got := strings.TrimPrefix(v.GitVersion, "v")
if !strings.HasPrefix(got, want) {
return fmt.Errorf("master had kube-apiserver version %s which does not start with %s",
got, want)
}
Logf("Master is at version %s", want)
return nil
}
func CheckNodesVersions(cs clientset.Interface, want string) error {
l := GetReadySchedulableNodesOrDie(cs)
for _, n := range l.Items {
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4
// kv/kvp look like: v0.19.3-815-g50e67d4034e858-dirty
kv, kpv := strings.TrimPrefix(n.Status.NodeInfo.KubeletVersion, "v"),
strings.TrimPrefix(n.Status.NodeInfo.KubeProxyVersion, "v")
if !strings.HasPrefix(kv, want) {
return fmt.Errorf("node %s had kubelet version %s which does not start with %s",
n.ObjectMeta.Name, kv, want)
}
if !strings.HasPrefix(kpv, want) {
return fmt.Errorf("node %s had kube-proxy version %s which does not start with %s",
n.ObjectMeta.Name, kpv, want)
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,651 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
package framework
import (
"fmt"
"path/filepath"
"strconv"
"time"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
clientset "k8s.io/client-go/kubernetes"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
Kb int64 = 1000
Mb int64 = 1000 * Kb
Gb int64 = 1000 * Mb
Tb int64 = 1000 * Gb
KiB int64 = 1024
MiB int64 = 1024 * KiB
GiB int64 = 1024 * MiB
TiB int64 = 1024 * GiB
// Waiting period for volume server (Ceph, ...) to initialize itself.
VolumeServerPodStartupTimeout = 3 * time.Minute
// Waiting period for pod to be cleaned up and unmount its volumes so we
// don't tear down containers with NFS/Ceph/Gluster server too early.
PodCleanupTimeout = 20 * time.Second
)
// Configuration of one tests. The test consist of:
// - server pod - runs serverImage, exports ports[]
// - client pod - does not need any special configuration
type VolumeTestConfig struct {
Namespace string
// Prefix of all pods. Typically the test name.
Prefix string
// Name of container image for the server pod.
ServerImage string
// Ports to export from the server pod. TCP only.
ServerPorts []int
// Commands to run in the container image.
ServerCmds []string
// Arguments to pass to the container image.
ServerArgs []string
// Volumes needed to be mounted to the server container from the host
// map <host (source) path> -> <container (dst.) path>
// if <host (source) path> is empty, mount a tmpfs emptydir
ServerVolumes map[string]string
// Message to wait for before starting clients
ServerReadyMessage string
// Wait for the pod to terminate successfully
// False indicates that the pod is long running
WaitForCompletion bool
// ServerNodeName is the spec.nodeName to run server pod on. Default is any node.
ServerNodeName string
// ClientNodeName is the spec.nodeName to run client pod on. Default is any node.
ClientNodeName string
// NodeSelector to use in pod spec (server, client and injector pods).
NodeSelector map[string]string
}
// VolumeTest contains a volume to mount into a client pod and its
// expected content.
type VolumeTest struct {
Volume v1.VolumeSource
File string
ExpectedContent string
}
// NFS-specific wrapper for CreateStorageServer.
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "nfs",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
ServerPorts: []int{2049},
ServerVolumes: map[string]string{"": "/exports"},
ServerReadyMessage: "NFS started",
}
if len(args) > 0 {
config.ServerArgs = args
}
pod, ip = CreateStorageServer(cs, config)
return config, pod, ip
}
// GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "gluster",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
ServerPorts: []int{24007, 24008, 49152},
}
pod, ip = CreateStorageServer(cs, config)
By("creating Gluster endpoints")
endpoints := &v1.Endpoints{
TypeMeta: metav1.TypeMeta{
Kind: "Endpoints",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-server",
},
Subsets: []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{
{
IP: ip,
},
},
Ports: []v1.EndpointPort{
{
Name: "gluster",
Port: 24007,
Protocol: v1.ProtocolTCP,
},
},
},
},
}
endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
ExpectNoError(err, "failed to create endpoints for Gluster server")
return config, pod, ip
}
// iSCSI-specific wrapper for CreateStorageServer.
func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "iscsi",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer),
ServerPorts: []int{3260},
ServerVolumes: map[string]string{
// iSCSI container needs to insert modules from the host
"/lib/modules": "/lib/modules",
},
ServerReadyMessage: "Configuration restored from /etc/target/saveconfig.json",
}
pod, ip = CreateStorageServer(cs, config)
return config, pod, ip
}
// CephRBD-specific wrapper for CreateStorageServer.
func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "rbd",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
ServerPorts: []int{6789},
ServerVolumes: map[string]string{
"/lib/modules": "/lib/modules",
},
ServerReadyMessage: "Ceph is ready",
}
pod, ip = CreateStorageServer(cs, config)
// create secrets for the server
secret = &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-secret",
},
Data: map[string][]byte{
// from test/images/volumes-tester/rbd/keyring
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
},
Type: "kubernetes.io/rbd",
}
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret)
if err != nil {
Failf("Failed to create secrets for Ceph RBD: %v", err)
}
return config, pod, secret, ip
}
// Wrapper for StartVolumeServer(). A storage server config is passed in, and a pod pointer
// and ip address string are returned.
// Note: Expect() is called so no error is returned.
func CreateStorageServer(cs clientset.Interface, config VolumeTestConfig) (pod *v1.Pod, ip string) {
pod = StartVolumeServer(cs, config)
Expect(pod).NotTo(BeNil(), "storage server pod should not be nil")
ip = pod.Status.PodIP
Expect(len(ip)).NotTo(BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
Logf("%s server pod IP address: %s", config.Prefix, ip)
return pod, ip
}
// Starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
portCount := len(config.ServerPorts)
serverPodPorts := make([]v1.ContainerPort, portCount)
for i := 0; i < portCount; i++ {
portName := fmt.Sprintf("%s-%d", config.Prefix, i)
serverPodPorts[i] = v1.ContainerPort{
Name: portName,
ContainerPort: int32(config.ServerPorts[i]),
Protocol: v1.ProtocolTCP,
}
}
volumeCount := len(config.ServerVolumes)
volumes := make([]v1.Volume, volumeCount)
mounts := make([]v1.VolumeMount, volumeCount)
i := 0
for src, dst := range config.ServerVolumes {
mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName
if src == "" {
volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{}
} else {
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src,
}
}
mounts[i].Name = mountName
mounts[i].ReadOnly = false
mounts[i].MountPath = dst
i++
}
serverPodName := fmt.Sprintf("%s-server", config.Prefix)
By(fmt.Sprint("creating ", serverPodName, " pod"))
privileged := new(bool)
*privileged = true
restartPolicy := v1.RestartPolicyAlways
if config.WaitForCompletion {
restartPolicy = v1.RestartPolicyNever
}
serverPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: serverPodName,
Labels: map[string]string{
"role": serverPodName,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: serverPodName,
Image: config.ServerImage,
SecurityContext: &v1.SecurityContext{
Privileged: privileged,
},
Command: config.ServerCmds,
Args: config.ServerArgs,
Ports: serverPodPorts,
VolumeMounts: mounts,
},
},
Volumes: volumes,
RestartPolicy: restartPolicy,
NodeName: config.ServerNodeName,
NodeSelector: config.NodeSelector,
},
}
var pod *v1.Pod
serverPod, err := podClient.Create(serverPod)
// ok if the server pod already exists. TODO: make this controllable by callers
if err != nil {
if apierrs.IsAlreadyExists(err) {
Logf("Ignore \"already-exists\" error, re-get pod...")
By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
pod = serverPod
} else {
ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
}
}
if config.WaitForCompletion {
ExpectNoError(WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
ExpectNoError(podClient.Delete(serverPod.Name, nil))
} else {
ExpectNoError(WaitForPodRunningInNamespace(client, serverPod))
if pod == nil {
By(fmt.Sprintf("locating the %q server pod", serverPodName))
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
}
}
if config.ServerReadyMessage != "" {
_, err := LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
}
return pod
}
// Wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
func CleanUpVolumeServer(f *Framework, serverPod *v1.Pod) {
CleanUpVolumeServerWithSecret(f, serverPod, nil)
}
// Wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
func CleanUpVolumeServerWithSecret(f *Framework, serverPod *v1.Pod, secret *v1.Secret) {
cs := f.ClientSet
ns := f.Namespace
if secret != nil {
Logf("Deleting server secret %q...", secret.Name)
err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{})
if err != nil {
Logf("Delete secret failed: %v", err)
}
}
Logf("Deleting server pod %q...", serverPod.Name)
err := DeletePodWithWait(f, cs, serverPod)
if err != nil {
Logf("Server pod delete failed: %v", err)
}
}
// Clean both server and client pods.
func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.Prefix))
defer GinkgoRecover()
cs := f.ClientSet
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-client", config.Namespace)
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-client", config.Namespace)
if config.ServerImage != "" {
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-server", config.Namespace)
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
}
}
// TestVolumeClient start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees expected data, e.g. from the server pod.
// Multiple VolumeTests can be specified to mount multiple volumes to a single
// pod.
func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, fsType string, tests []VolumeTest) {
By(fmt.Sprint("starting ", config.Prefix, "-client"))
var gracePeriod int64 = 1
var command string
if !NodeOSDistroIs("windows") {
command = "while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done "
} else {
command = "while(1) {cat /opt/0/index.html ; sleep 2 ; ls /opt/; sleep 2}"
}
seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
clientPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-client",
Labels: map[string]string{
"role": config.Prefix + "-client",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.Prefix + "-client",
Image: GetTestImage(BusyBoxImage),
WorkingDir: "/opt",
// An imperative and easily debuggable container which reads vol contents for
// us to scan in the tests or by eye.
// We expect that /opt is empty in the minimal containers which we use in this test.
Command: GenerateScriptCmd(command),
VolumeMounts: []v1.VolumeMount{},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: GeneratePodSecurityContext(fsGroup, seLinuxOptions),
Volumes: []v1.Volume{},
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}
podsNamespacer := client.CoreV1().Pods(config.Namespace)
for i, test := range tests {
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/opt/%d", i),
})
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
Name: volumeName,
VolumeSource: test.Volume,
})
}
clientPod, err := podsNamespacer.Create(clientPod)
if err != nil {
Failf("Failed to create %s pod: %v", clientPod.Name, err)
}
ExpectNoError(WaitForPodRunningInNamespace(client, clientPod))
By("Checking that text file contents are perfect.")
for i, test := range tests {
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands := GenerateReadFileCmd(fileName)
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, commands, test.ExpectedContent, time.Minute)
ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
}
if !NodeOSDistroIs("windows") {
if fsGroup != nil {
By("Checking fsGroup is correct.")
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
}
if fsType != "" {
By("Checking fsType is correct.")
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"grep", " /opt/0 ", "/proc/mounts"}, fsType, time.Minute)
ExpectNoError(err, "failed: getting the right fsType %s", fsType)
}
}
}
// InjectHtml insert index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there.
// The volume must be writable.
func InjectHtml(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, volume v1.VolumeSource, content string) {
By(fmt.Sprint("starting ", config.Prefix, " injector"))
podClient := client.CoreV1().Pods(config.Namespace)
podName := fmt.Sprintf("%s-injector-%s", config.Prefix, rand.String(4))
volMountName := fmt.Sprintf("%s-volume-%s", config.Prefix, rand.String(4))
fileName := "/mnt/index.html"
injectPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{
"role": config.Prefix + "-injector",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.Prefix + "-injector",
Image: GetTestImage(BusyBoxImage),
Command: GenerateWriteFileCmd(content, fileName),
VolumeMounts: []v1.VolumeMount{
{
Name: volMountName,
MountPath: "/mnt",
},
},
SecurityContext: GenerateSecurityContext(true),
},
},
SecurityContext: &v1.PodSecurityContext{
FSGroup: fsGroup,
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volMountName,
VolumeSource: volume,
},
},
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}
defer func() {
podClient.Delete(podName, nil)
}()
injectPod, err := podClient.Create(injectPod)
ExpectNoError(err, "Failed to create injector pod: %v", err)
err = WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
ExpectNoError(err)
}
func CreateGCEVolume() (*v1.PersistentVolumeSource, string) {
diskName, err := CreatePDWithRetry()
ExpectNoError(err)
return &v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext3",
ReadOnly: false,
},
}, diskName
}
// GenerateScriptCmd generates the corresponding command lines to execute a command.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func GenerateScriptCmd(command string) []string {
var commands []string
if !NodeOSDistroIs("windows") {
commands = []string{"/bin/sh", "-c", command}
} else {
commands = []string{"powershell", "/c", command}
}
return commands
}
// GenerateWriteFileCmd generates the corresponding command lines to write a file with the given content and file path.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func GenerateWriteFileCmd(content, fullPath string) []string {
var commands []string
if !NodeOSDistroIs("windows") {
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + fullPath}
} else {
commands = []string{"powershell", "/c", "echo '" + content + "' > " + fullPath}
}
return commands
}
// GenerateReadFileCmd generates the corresponding command lines to read from a file with the given file path.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func GenerateReadFileCmd(fullPath string) []string {
var commands []string
if !NodeOSDistroIs("windows") {
commands = []string{"cat", fullPath}
} else {
commands = []string{"powershell", "/c", "type " + fullPath}
}
return commands
}
// GenerateWriteandExecuteScriptFileCmd generates the corresponding command lines to write a file with the given file path
// and also execute this file.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func GenerateWriteandExecuteScriptFileCmd(content, fileName, filePath string) []string {
// for windows cluster, modify the Pod spec.
if NodeOSDistroIs("windows") {
scriptName := fmt.Sprintf("%s.ps1", fileName)
fullPath := filepath.Join(filePath, scriptName)
cmd := "echo \"" + content + "\" > " + fullPath + "; .\\" + fullPath
Logf("generated pod command %s", cmd)
return []string{"powershell", "/c", cmd}
} else {
scriptName := fmt.Sprintf("%s.sh", fileName)
fullPath := filepath.Join(filePath, scriptName)
cmd := fmt.Sprintf("echo \"%s\" > %s; chmod u+x %s; %s;", content, fullPath, fullPath, fullPath)
return []string{"/bin/sh", "-ec", cmd}
}
}
// GenerateSecurityContext generates the corresponding container security context with the given inputs
// If the Node OS is windows, currently we will ignore the inputs and return nil.
// TODO: Will modify it after windows has its own security context
func GenerateSecurityContext(privileged bool) *v1.SecurityContext {
if NodeOSDistroIs("windows") {
return nil
}
return &v1.SecurityContext{
Privileged: &privileged,
}
}
// GeneratePodSecurityContext generates the corresponding pod security context with the given inputs
// If the Node OS is windows, currently we will ignore the inputs and return nil.
// TODO: Will modify it after windows has its own security context
func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOptions) *v1.PodSecurityContext {
if NodeOSDistroIs("windows") {
return nil
}
return &v1.PodSecurityContext{
SELinuxOptions: seLinuxOptions,
FSGroup: fsGroup,
}
}
// GetTestImage returns the image name with the given input
// If the Node OS is windows, currently we return Nettest image for Windows node
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
func GetTestImage(image string) string {
if NodeOSDistroIs("windows") {
return imageutils.GetE2EImage(imageutils.Nettest)
}
return image
}