mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
vendor files
This commit is contained in:
167
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
Normal file
167
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"authorizer_util.go",
|
||||
"cleanup.go",
|
||||
"deployment_util.go",
|
||||
"exec_util.go",
|
||||
"firewall_util.go",
|
||||
"framework.go",
|
||||
"get-kubemark-resource-usage.go",
|
||||
"google_compute.go",
|
||||
"gpu_util.go",
|
||||
"ingress_utils.go",
|
||||
"jobs_util.go",
|
||||
"kubelet_stats.go",
|
||||
"log_size_monitoring.go",
|
||||
"metrics_util.go",
|
||||
"networking_utils.go",
|
||||
"nodes_util.go",
|
||||
"perf_util.go",
|
||||
"pods.go",
|
||||
"psp_util.go",
|
||||
"pv_util.go",
|
||||
"rc_util.go",
|
||||
"resource_usage_gatherer.go",
|
||||
"rs_util.go",
|
||||
"service_util.go",
|
||||
"size.go",
|
||||
"statefulset_utils.go",
|
||||
"test_context.go",
|
||||
"upgrade_util.go",
|
||||
"util.go",
|
||||
"volume_util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/dockershim/metrics:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubemark:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/types:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/github.com/spf13/viper:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/ssh:go_default_library",
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/framework/ginkgowrapper:all-srcs",
|
||||
"//test/e2e/framework/metrics:all-srcs",
|
||||
"//test/e2e/framework/timer:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
151
vendor/k8s.io/kubernetes/test/e2e/framework/authorizer_util.go
generated
vendored
Normal file
151
vendor/k8s.io/kubernetes/test/e2e/framework/authorizer_util.go
generated
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
v1beta1authorization "k8s.io/client-go/kubernetes/typed/authorization/v1beta1"
|
||||
v1beta1rbac "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
policyCachePollInterval = 100 * time.Millisecond
|
||||
policyCachePollTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action.
|
||||
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
|
||||
func WaitForAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error {
|
||||
return WaitForNamedAuthorizationUpdate(c, user, namespace, verb, "", resource, allowed)
|
||||
}
|
||||
|
||||
// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action on the named resource.
|
||||
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
|
||||
func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb, resourceName string, resource schema.GroupResource, allowed bool) error {
|
||||
review := &authorizationv1beta1.SubjectAccessReview{
|
||||
Spec: authorizationv1beta1.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationv1beta1.ResourceAttributes{
|
||||
Group: resource.Group,
|
||||
Verb: verb,
|
||||
Resource: resource.Resource,
|
||||
Namespace: namespace,
|
||||
Name: resourceName,
|
||||
},
|
||||
User: user,
|
||||
},
|
||||
}
|
||||
err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) {
|
||||
response, err := c.SubjectAccessReviews().Create(review)
|
||||
// GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine
|
||||
// has adjusted as expected. In this case, simply wait one second and hope it's up to date
|
||||
if apierrors.IsNotFound(err) {
|
||||
fmt.Printf("SubjectAccessReview endpoint is missing\n")
|
||||
time.Sleep(1 * time.Second)
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if response.Status.Allowed != allowed {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// BindClusterRole binds the cluster role at the cluster scope
|
||||
func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + clusterRole,
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRole,
|
||||
},
|
||||
Subjects: subjects,
|
||||
})
|
||||
|
||||
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
|
||||
if err != nil {
|
||||
fmt.Printf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
|
||||
}
|
||||
}
|
||||
|
||||
// BindClusterRoleInNamespace binds the cluster role at the namespace scope
|
||||
func BindClusterRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
bindInNamespace(c, "ClusterRole", clusterRole, ns, subjects...)
|
||||
}
|
||||
|
||||
// BindRoleInNamespace binds the role at the namespace scope
|
||||
func BindRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, role, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
bindInNamespace(c, "Role", role, ns, subjects...)
|
||||
}
|
||||
|
||||
func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.RoleBindings(ns).Create(&rbacv1beta1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + role,
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: roleType,
|
||||
Name: role,
|
||||
},
|
||||
Subjects: subjects,
|
||||
})
|
||||
|
||||
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
|
||||
if err != nil {
|
||||
fmt.Printf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
isRBACEnabledOnce sync.Once
|
||||
isRBACEnabled bool
|
||||
)
|
||||
|
||||
func IsRBACEnabled(f *Framework) bool {
|
||||
isRBACEnabledOnce.Do(func() {
|
||||
crs, err := f.ClientSet.RbacV1().ClusterRoles().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
|
||||
isRBACEnabled = false
|
||||
} else if crs == nil || len(crs.Items) == 0 {
|
||||
Logf("No ClusteRoles found; assuming RBAC is disabled.")
|
||||
isRBACEnabled = false
|
||||
} else {
|
||||
Logf("Found ClusterRoles; assuming RBAC is enabled.")
|
||||
isRBACEnabled = true
|
||||
}
|
||||
})
|
||||
return isRBACEnabled
|
||||
}
|
61
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import "sync"
|
||||
|
||||
type CleanupActionHandle *int
|
||||
|
||||
var cleanupActionsLock sync.Mutex
|
||||
var cleanupActions = map[CleanupActionHandle]func(){}
|
||||
|
||||
// AddCleanupAction installs a function that will be called in the event of the
|
||||
// whole test being terminated. This allows arbitrary pieces of the overall
|
||||
// test to hook into SynchronizedAfterSuite().
|
||||
func AddCleanupAction(fn func()) CleanupActionHandle {
|
||||
p := CleanupActionHandle(new(int))
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
cleanupActions[p] = fn
|
||||
return p
|
||||
}
|
||||
|
||||
// RemoveCleanupAction removes a function that was installed by
|
||||
// AddCleanupAction.
|
||||
func RemoveCleanupAction(p CleanupActionHandle) {
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
delete(cleanupActions, p)
|
||||
}
|
||||
|
||||
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
|
||||
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
|
||||
// may remove themselves.
|
||||
func RunCleanupActions() {
|
||||
list := []func(){}
|
||||
func() {
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
for _, fn := range cleanupActions {
|
||||
list = append(list, fn)
|
||||
}
|
||||
}()
|
||||
// Run unlocked.
|
||||
for _, fn := range list {
|
||||
fn()
|
||||
}
|
||||
}
|
297
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
Normal file
297
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
Normal file
@ -0,0 +1,297 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {
|
||||
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to clean up old rcs.
|
||||
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*extensions.ReplicaSet
|
||||
var d *extensions.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
d = deployment
|
||||
|
||||
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(oldRSs) == desiredRSNum, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
|
||||
logReplicaSetsOfDeployment(d, oldRSs, nil)
|
||||
}
|
||||
return pollErr
|
||||
}
|
||||
|
||||
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
|
||||
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
|
||||
}
|
||||
|
||||
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
|
||||
return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
|
||||
}
|
||||
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
|
||||
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
|
||||
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
|
||||
// may result in taking longer to relabel a RS.
|
||||
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
|
||||
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment {
|
||||
zero := int64(0)
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: strategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
|
||||
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
|
||||
// such as shortly after a scaling event or the deployment is just created.
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment) error {
|
||||
return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensions.Deployment) error {
|
||||
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
||||
return testutils.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
|
||||
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
|
||||
return testutils.WaitForDeploymentRollbackCleared(c, ns, deploymentName, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
|
||||
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
status := d.Status
|
||||
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
d := event.Object.(*extensions.Deployment)
|
||||
status = d.Status
|
||||
|
||||
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.ExtensionsV1beta1())
|
||||
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.ExtensionsV1beta1())
|
||||
if err == nil && nerr == nil {
|
||||
Logf("%+v", d)
|
||||
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
|
||||
}
|
||||
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
|
||||
}
|
||||
|
||||
return *(d.Spec.Replicas) == d.Status.Replicas &&
|
||||
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
|
||||
d.Generation <= d.Status.ObservedGeneration, nil
|
||||
}
|
||||
|
||||
_, err = watch.Until(2*time.Minute, w, condition)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
|
||||
}
|
||||
|
||||
func RunDeployment(config testutils.DeploymentConfig) error {
|
||||
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutils.RunDeployment(config)
|
||||
}
|
||||
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) {
|
||||
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
|
||||
}
|
||||
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *extensions.Deployment, targetRevision string) error {
|
||||
err := wait.PollImmediate(Poll, pollLongTimeout, func() (bool, error) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
revision := deployment.Annotations[deploymentutil.RevisionAnnotation]
|
||||
return revision == targetRevision, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for revision to become %q for deployment %q: %v", targetRevision, d.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
|
||||
func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error {
|
||||
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
|
||||
}
|
||||
|
||||
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) {
|
||||
deploymentSpec := MakeDeployment(replicas, podLabels, namespace, pvclaims, false, command)
|
||||
deployment, err := client.Extensions().Deployments(namespace).Create(deploymentSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
|
||||
}
|
||||
Logf("Waiting deployment %q to complete", deploymentSpec.Name)
|
||||
err = WaitForDeploymentComplete(client, deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err)
|
||||
}
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
// MakeDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakeDeployment(replicas int32, podLabels map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
}
|
||||
zero := int64(0)
|
||||
deploymentName := "deployment-" + string(uuid.NewUUID())
|
||||
deploymentSpec := &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &isPrivileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
deploymentSpec.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
deploymentSpec.Spec.Template.Spec.Volumes = volumes
|
||||
return deploymentSpec
|
||||
}
|
||||
|
||||
// GetPodsForDeployment gets pods for the given deployment
|
||||
func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Deployment) (*v1.PodList, error) {
|
||||
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
|
||||
}
|
||||
if replicaSet == nil {
|
||||
return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
|
||||
}
|
||||
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
return client.Core().Pods(namespace).List(options)
|
||||
}
|
||||
rsList := []*extensions.ReplicaSet{replicaSet}
|
||||
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
|
||||
}
|
||||
return podList, nil
|
||||
}
|
147
vendor/k8s.io/kubernetes/test/e2e/framework/exec_util.go
generated
vendored
Normal file
147
vendor/k8s.io/kubernetes/test/e2e/framework/exec_util.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// ExecOptions passed to ExecWithOptions
|
||||
type ExecOptions struct {
|
||||
Command []string
|
||||
|
||||
Namespace string
|
||||
PodName string
|
||||
ContainerName string
|
||||
|
||||
Stdin io.Reader
|
||||
CaptureStdout bool
|
||||
CaptureStderr bool
|
||||
// If false, whitespace in std{err,out} will be removed.
|
||||
PreserveWhitespace bool
|
||||
}
|
||||
|
||||
// ExecWithOptions executes a command in the specified container,
|
||||
// returning stdout, stderr and error. `options` allowed for
|
||||
// additional parameters to be passed.
|
||||
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
|
||||
Logf("ExecWithOptions %+v", options)
|
||||
|
||||
config, err := LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to load restclient config")
|
||||
|
||||
const tty = false
|
||||
|
||||
req := f.ClientSet.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(options.PodName).
|
||||
Namespace(options.Namespace).
|
||||
SubResource("exec").
|
||||
Param("container", options.ContainerName)
|
||||
req.VersionedParams(&v1.PodExecOptions{
|
||||
Container: options.ContainerName,
|
||||
Command: options.Command,
|
||||
Stdin: options.Stdin != nil,
|
||||
Stdout: options.CaptureStdout,
|
||||
Stderr: options.CaptureStderr,
|
||||
TTY: tty,
|
||||
}, legacyscheme.ParameterCodec)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
|
||||
if options.PreserveWhitespace {
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err
|
||||
}
|
||||
|
||||
// ExecCommandInContainerWithFullOutput executes a command in the
|
||||
// specified container and return stdout, stderr and error
|
||||
func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName string, cmd ...string) (string, string, error) {
|
||||
return f.ExecWithOptions(ExecOptions{
|
||||
Command: cmd,
|
||||
Namespace: f.Namespace.Name,
|
||||
PodName: podName,
|
||||
ContainerName: containerName,
|
||||
|
||||
Stdin: nil,
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
PreserveWhitespace: false,
|
||||
})
|
||||
}
|
||||
|
||||
// ExecCommandInContainer executes a command in the specified container.
|
||||
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
|
||||
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
||||
Logf("Exec stderr: %q", stderr)
|
||||
Expect(err).NotTo(HaveOccurred(),
|
||||
"failed to execute command in pod %v, container %v: %v",
|
||||
podName, containerName, err)
|
||||
return stdout
|
||||
}
|
||||
|
||||
func (f *Framework) ExecShellInContainer(podName, containerName string, cmd string) string {
|
||||
return f.ExecCommandInContainer(podName, containerName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (f *Framework) ExecCommandInPod(podName string, cmd ...string) string {
|
||||
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod")
|
||||
Expect(pod.Spec.Containers).NotTo(BeEmpty())
|
||||
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
}
|
||||
|
||||
func (f *Framework) ExecCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
|
||||
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod")
|
||||
Expect(pod.Spec.Containers).NotTo(BeEmpty())
|
||||
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
}
|
||||
|
||||
func (f *Framework) ExecShellInPod(podName string, cmd string) string {
|
||||
return f.ExecCommandInPod(podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (f *Framework) ExecShellInPodWithFullOutput(podName string, cmd string) (string, string, error) {
|
||||
return f.ExecCommandInPodWithFullOutput(podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, method, url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return exec.Stream(remotecommand.StreamOptions{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
Tty: tty,
|
||||
})
|
||||
}
|
389
vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go
generated
vendored
Normal file
389
vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go
generated
vendored
Normal file
@ -0,0 +1,389 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
FirewallTimeoutDefault = 3 * time.Minute
|
||||
FirewallTestTcpTimeout = time.Duration(1 * time.Second)
|
||||
// Set ports outside of 30000-32767, 80 and 8080 to avoid being whitelisted by the e2e cluster
|
||||
FirewallTestHttpPort = int32(29999)
|
||||
FirewallTestUdpPort = int32(29998)
|
||||
)
|
||||
|
||||
// MakeFirewallNameForLBService return the expected firewall name for a LB service.
|
||||
// This should match the formatting of makeFirewallName() in pkg/cloudprovider/providers/gce/gce_loadbalancer.go
|
||||
func MakeFirewallNameForLBService(name string) string {
|
||||
return fmt.Sprintf("k8s-fw-%s", name)
|
||||
}
|
||||
|
||||
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeFirewallNameForLBService(cloudprovider.GetLoadBalancerName(svc))
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
if svc.Spec.LoadBalancerSourceRanges == nil {
|
||||
fw.SourceRanges = []string{"0.0.0.0/0"}
|
||||
} else {
|
||||
fw.SourceRanges = svc.Spec.LoadBalancerSourceRanges
|
||||
}
|
||||
for _, sp := range svc.Spec.Ports {
|
||||
fw.Allowed = append(fw.Allowed, &compute.FirewallAllowed{
|
||||
IPProtocol: strings.ToLower(string(sp.Protocol)),
|
||||
Ports: []string{strconv.Itoa(int(sp.Port))},
|
||||
})
|
||||
}
|
||||
return &fw
|
||||
}
|
||||
|
||||
func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHealthCheck bool) string {
|
||||
return gcecloud.MakeHealthCheckFirewallName(clusterID, name, isNodesHealthCheck)
|
||||
}
|
||||
|
||||
// ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), isNodesHealthCheck)
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
|
||||
healthCheckPort := gcecloud.GetNodesHealthCheckPort()
|
||||
if !isNodesHealthCheck {
|
||||
healthCheckPort = svc.Spec.HealthCheckNodePort
|
||||
}
|
||||
fw.Allowed = []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{fmt.Sprintf("%d", healthCheckPort)},
|
||||
},
|
||||
}
|
||||
return &fw
|
||||
}
|
||||
|
||||
// GetInstanceTags gets tags from GCE instance with given name.
|
||||
func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
res, err := gceCloud.GetComputeService().Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
|
||||
instanceName).Do()
|
||||
if err != nil {
|
||||
Failf("Failed to get instance tags for %v: %v", instanceName, err)
|
||||
}
|
||||
return res.Tags
|
||||
}
|
||||
|
||||
// SetInstanceTags sets tags on GCE instance with given name.
|
||||
func SetInstanceTags(cloudConfig CloudConfig, instanceName, zone string, tags []string) []string {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
// Re-get instance everytime because we need the latest fingerprint for updating metadata
|
||||
resTags := GetInstanceTags(cloudConfig, instanceName)
|
||||
_, err := gceCloud.GetComputeService().Instances.SetTags(
|
||||
cloudConfig.ProjectID, zone, instanceName,
|
||||
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
||||
if err != nil {
|
||||
Failf("failed to set instance tags: %v", err)
|
||||
}
|
||||
Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
|
||||
return resTags.Items
|
||||
}
|
||||
|
||||
// GetNodeTags gets k8s node tag from one of the nodes
|
||||
func GetNodeTags(c clientset.Interface, cloudConfig CloudConfig) []string {
|
||||
nodes := GetReadySchedulableNodesOrDie(c)
|
||||
if len(nodes.Items) == 0 {
|
||||
Logf("GetNodeTags: Found 0 node.")
|
||||
return []string{}
|
||||
}
|
||||
return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items
|
||||
}
|
||||
|
||||
// GetInstancePrefix returns the INSTANCE_PREFIX env we set for e2e cluster.
|
||||
// From cluster/gce/config-test.sh, master name is set up using below format:
|
||||
// MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
func GetInstancePrefix(masterName string) (string, error) {
|
||||
if !strings.HasSuffix(masterName, "-master") {
|
||||
return "", fmt.Errorf("unexpected master name format: %v", masterName)
|
||||
}
|
||||
return masterName[:len(masterName)-7], nil
|
||||
}
|
||||
|
||||
// GetClusterName returns the CLUSTER_NAME env we set for e2e cluster.
|
||||
// From cluster/gce/config-test.sh, cluster name is set up using below format:
|
||||
// CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
|
||||
func GetClusterName(instancePrefix string) string {
|
||||
return instancePrefix
|
||||
}
|
||||
|
||||
// GetE2eFirewalls returns all firewall rules we create for an e2e cluster.
|
||||
// From cluster/gce/util.sh, all firewall rules should be consistent with the ones created by startup scripts.
|
||||
func GetE2eFirewalls(masterName, masterTag, nodeTag, network, clusterIpRange string) []*compute.Firewall {
|
||||
instancePrefix, err := GetInstancePrefix(masterName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
clusterName := GetClusterName(instancePrefix)
|
||||
|
||||
fws := []*compute.Firewall{}
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: clusterName + "-default-internal-master",
|
||||
SourceRanges: []string{"10.0.0.0/8"},
|
||||
TargetTags: []string{masterTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"1-2379"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"2382-65535"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "udp",
|
||||
Ports: []string{"1-65535"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "icmp",
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: clusterName + "-default-internal-node",
|
||||
SourceRanges: []string{"10.0.0.0/8"},
|
||||
TargetTags: []string{nodeTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"1-65535"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "udp",
|
||||
Ports: []string{"1-65535"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "icmp",
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: network + "-default-ssh",
|
||||
SourceRanges: []string{"0.0.0.0/0"},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"22"},
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: masterName + "-etcd",
|
||||
SourceTags: []string{masterTag},
|
||||
TargetTags: []string{masterTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"2380"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"2381"},
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: masterName + "-https",
|
||||
SourceRanges: []string{"0.0.0.0/0"},
|
||||
TargetTags: []string{masterTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"443"},
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: nodeTag + "-all",
|
||||
SourceRanges: []string{clusterIpRange},
|
||||
TargetTags: []string{nodeTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
},
|
||||
{
|
||||
IPProtocol: "udp",
|
||||
},
|
||||
{
|
||||
IPProtocol: "icmp",
|
||||
},
|
||||
{
|
||||
IPProtocol: "esp",
|
||||
},
|
||||
{
|
||||
IPProtocol: "ah",
|
||||
},
|
||||
{
|
||||
IPProtocol: "sctp",
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: nodeTag + "-" + instancePrefix + "-http-alt",
|
||||
SourceRanges: []string{"0.0.0.0/0"},
|
||||
TargetTags: []string{nodeTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"80"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"8080"},
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: nodeTag + "-" + instancePrefix + "-nodeports",
|
||||
SourceRanges: []string{"0.0.0.0/0"},
|
||||
TargetTags: []string{nodeTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"30000-32767"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "udp",
|
||||
Ports: []string{"30000-32767"},
|
||||
},
|
||||
},
|
||||
})
|
||||
return fws
|
||||
}
|
||||
|
||||
// PackProtocolsPortsFromFirewall packs protocols and ports in an unified way for verification.
|
||||
func PackProtocolsPortsFromFirewall(alloweds []*compute.FirewallAllowed) []string {
|
||||
protocolPorts := []string{}
|
||||
for _, allowed := range alloweds {
|
||||
for _, port := range allowed.Ports {
|
||||
protocolPorts = append(protocolPorts, strings.ToLower(allowed.IPProtocol+"/"+port))
|
||||
}
|
||||
}
|
||||
return protocolPorts
|
||||
}
|
||||
|
||||
// SameStringArray verifies whether two string arrays have the same strings, return error if not.
|
||||
// Order does not matter.
|
||||
// When `include` is set to true, verifies whether result includes all elements from expected.
|
||||
func SameStringArray(result, expected []string, include bool) error {
|
||||
res := sets.NewString(result...)
|
||||
exp := sets.NewString(expected...)
|
||||
if !include {
|
||||
diff := res.Difference(exp)
|
||||
if len(diff) != 0 {
|
||||
return fmt.Errorf("found differences: %v", diff)
|
||||
}
|
||||
} else {
|
||||
if !res.IsSuperset(exp) {
|
||||
return fmt.Errorf("some elements are missing: expected %v, got %v", expected, result)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyFirewallRule verifies whether the result firewall is consistent with the expected firewall.
|
||||
// When `portsSubset` is false, match given ports exactly. Otherwise, only check ports are included.
|
||||
func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset bool) error {
|
||||
if res == nil || exp == nil {
|
||||
return fmt.Errorf("res and exp must not be nil")
|
||||
}
|
||||
if res.Name != exp.Name {
|
||||
return fmt.Errorf("incorrect name: %v, expected %v", res.Name, exp.Name)
|
||||
}
|
||||
// Sample Network value: https://www.googleapis.com/compute/v1/projects/{project-id}/global/networks/e2e
|
||||
if !strings.HasSuffix(res.Network, "/"+network) {
|
||||
return fmt.Errorf("incorrect network: %v, expected ends with: %v", res.Network, "/"+network)
|
||||
}
|
||||
if err := SameStringArray(PackProtocolsPortsFromFirewall(res.Allowed),
|
||||
PackProtocolsPortsFromFirewall(exp.Allowed), portsSubset); err != nil {
|
||||
return fmt.Errorf("incorrect allowed protocols ports: %v", err)
|
||||
}
|
||||
if err := SameStringArray(res.SourceRanges, exp.SourceRanges, false); err != nil {
|
||||
return fmt.Errorf("incorrect source ranges %v, expected %v: %v", res.SourceRanges, exp.SourceRanges, err)
|
||||
}
|
||||
if err := SameStringArray(res.SourceTags, exp.SourceTags, false); err != nil {
|
||||
return fmt.Errorf("incorrect source tags %v, expected %v: %v", res.SourceTags, exp.SourceTags, err)
|
||||
}
|
||||
if err := SameStringArray(res.TargetTags, exp.TargetTags, false); err != nil {
|
||||
return fmt.Errorf("incorrect target tags %v, expected %v: %v", res.TargetTags, exp.TargetTags, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WaitForFirewallRule(gceCloud *gcecloud.GCECloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) {
|
||||
Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist)
|
||||
var fw *compute.Firewall
|
||||
var err error
|
||||
|
||||
condition := func() (bool, error) {
|
||||
fw, err = gceCloud.GetFirewall(fwName)
|
||||
if err != nil && exist ||
|
||||
err == nil && !exist ||
|
||||
err != nil && !exist && !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(5*time.Second, timeout, condition); err != nil {
|
||||
return nil, fmt.Errorf("error waiting for firewall %v exist=%v", fwName, exist)
|
||||
}
|
||||
return fw, nil
|
||||
}
|
||||
|
||||
func GetClusterID(c clientset.Interface) (string, error) {
|
||||
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
|
||||
if err != nil || cm == nil {
|
||||
return "", fmt.Errorf("error getting cluster ID: %v", err)
|
||||
}
|
||||
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
|
||||
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
|
||||
if !clusterIDExists {
|
||||
return "", fmt.Errorf("cluster ID not set")
|
||||
}
|
||||
if providerIDExists {
|
||||
return providerID, nil
|
||||
}
|
||||
return clusterID, nil
|
||||
}
|
835
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
Normal file
835
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
Normal file
@ -0,0 +1,835 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
maxKubectlExecRetries = 5
|
||||
// TODO(mikedanese): reset this to 5 minutes once #47135 is resolved.
|
||||
// ref https://github.com/kubernetes/kubernetes/issues/47135
|
||||
DefaultNamespaceDeletionTimeout = 10 * time.Minute
|
||||
)
|
||||
|
||||
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
|
||||
// Eventual goal is to merge this with integration test framework.
|
||||
type Framework struct {
|
||||
BaseName string
|
||||
|
||||
ClientSet clientset.Interface
|
||||
KubemarkExternalClusterClientSet clientset.Interface
|
||||
|
||||
InternalClientset *internalclientset.Clientset
|
||||
AggregatorClient *aggregatorclient.Clientset
|
||||
ClientPool dynamic.ClientPool
|
||||
|
||||
SkipNamespaceCreation bool // Whether to skip creating a namespace
|
||||
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
|
||||
namespacesToDelete []*v1.Namespace // Some tests have more than one.
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace
|
||||
|
||||
gatherer *containerResourceGatherer
|
||||
// Constraints that passed to a check which is executed after data is gathered to
|
||||
// see if 99% of results are within acceptable bounds. It has to be injected in the test,
|
||||
// as expectations vary greatly. Constraints are grouped by the container names.
|
||||
AddonResourceConstraints map[string]ResourceConstraint
|
||||
|
||||
logsSizeWaitGroup sync.WaitGroup
|
||||
logsSizeCloseChannel chan bool
|
||||
logsSizeVerifier *LogsSizeVerifier
|
||||
|
||||
// To make sure that this framework cleans up after itself, no matter what,
|
||||
// we install a Cleanup action before each test and clear it after. If we
|
||||
// should abort, the AfterSuite hook should run all Cleanup actions.
|
||||
cleanupHandle CleanupActionHandle
|
||||
|
||||
// configuration for framework's client
|
||||
Options FrameworkOptions
|
||||
|
||||
// Place where various additional data is stored during test run to be printed to ReportDir,
|
||||
// or stdout if ReportDir is not set once test ends.
|
||||
TestSummaries []TestDataSummary
|
||||
|
||||
kubemarkControllerCloseChannel chan struct{}
|
||||
|
||||
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
|
||||
clusterAutoscalerMetricsBeforeTest metrics.MetricsCollection
|
||||
}
|
||||
|
||||
type TestDataSummary interface {
|
||||
SummaryKind() string
|
||||
PrintHumanReadable() string
|
||||
PrintJSON() string
|
||||
}
|
||||
|
||||
type FrameworkOptions struct {
|
||||
ClientQPS float32
|
||||
ClientBurst int
|
||||
GroupVersion *schema.GroupVersion
|
||||
}
|
||||
|
||||
// NewFramework makes a new framework and sets up a BeforeEach/AfterEach for
|
||||
// you (you can write additional before/after each functions).
|
||||
func NewDefaultFramework(baseName string) *Framework {
|
||||
options := FrameworkOptions{
|
||||
ClientQPS: 20,
|
||||
ClientBurst: 50,
|
||||
}
|
||||
return NewFramework(baseName, options, nil)
|
||||
}
|
||||
|
||||
func NewFramework(baseName string, options FrameworkOptions, client clientset.Interface) *Framework {
|
||||
f := &Framework{
|
||||
BaseName: baseName,
|
||||
AddonResourceConstraints: make(map[string]ResourceConstraint),
|
||||
Options: options,
|
||||
ClientSet: client,
|
||||
}
|
||||
|
||||
BeforeEach(f.BeforeEach)
|
||||
AfterEach(f.AfterEach)
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// BeforeEach gets a client and makes a namespace.
|
||||
func (f *Framework) BeforeEach() {
|
||||
// The fact that we need this feels like a bug in ginkgo.
|
||||
// https://github.com/onsi/ginkgo/issues/222
|
||||
f.cleanupHandle = AddCleanupAction(f.AfterEach)
|
||||
if f.ClientSet == nil {
|
||||
By("Creating a kubernetes client")
|
||||
config, err := LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
config.QPS = f.Options.ClientQPS
|
||||
config.Burst = f.Options.ClientBurst
|
||||
if f.Options.GroupVersion != nil {
|
||||
config.GroupVersion = f.Options.GroupVersion
|
||||
}
|
||||
if TestContext.KubeAPIContentType != "" {
|
||||
config.ContentType = TestContext.KubeAPIContentType
|
||||
}
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.InternalClientset, err = internalclientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.ClientPool = dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil {
|
||||
externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig)
|
||||
externalConfig.QPS = f.Options.ClientQPS
|
||||
externalConfig.Burst = f.Options.ClientBurst
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalClient, err := clientset.NewForConfig(externalConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.KubemarkExternalClusterClientSet = externalClient
|
||||
f.kubemarkControllerCloseChannel = make(chan struct{})
|
||||
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
|
||||
kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0)
|
||||
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
|
||||
go kubemarkNodeInformer.Informer().Run(f.kubemarkControllerCloseChannel)
|
||||
TestContext.CloudConfig.KubemarkController, err = kubemark.NewKubemarkController(f.KubemarkExternalClusterClientSet, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalInformerFactory.Start(f.kubemarkControllerCloseChannel)
|
||||
Expect(TestContext.CloudConfig.KubemarkController.WaitForCacheSync(f.kubemarkControllerCloseChannel)).To(BeTrue())
|
||||
go TestContext.CloudConfig.KubemarkController.Run(f.kubemarkControllerCloseChannel)
|
||||
}
|
||||
}
|
||||
|
||||
if !f.SkipNamespaceCreation {
|
||||
By("Building a namespace api object")
|
||||
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
|
||||
"e2e-framework": f.BaseName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
f.Namespace = namespace
|
||||
|
||||
if TestContext.VerifyServiceAccount {
|
||||
By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
Logf("Skipping waiting for service account")
|
||||
}
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
|
||||
var err error
|
||||
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
|
||||
InKubemark: ProviderIs("kubemark"),
|
||||
MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
|
||||
ResourceDataGatheringPeriod: 60 * time.Second,
|
||||
ProbeDuration: 15 * time.Second,
|
||||
PrintVerboseLogs: false,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
Logf("Error while creating NewResourceUsageGatherer: %v", err)
|
||||
} else {
|
||||
go f.gatherer.StartGatheringData()
|
||||
}
|
||||
}
|
||||
|
||||
if TestContext.GatherLogsSizes {
|
||||
f.logsSizeWaitGroup = sync.WaitGroup{}
|
||||
f.logsSizeWaitGroup.Add(1)
|
||||
f.logsSizeCloseChannel = make(chan bool)
|
||||
f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
|
||||
go func() {
|
||||
f.logsSizeVerifier.Run()
|
||||
f.logsSizeWaitGroup.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
gatherMetricsAfterTest := TestContext.GatherMetricsAfterTest == "true" || TestContext.GatherMetricsAfterTest == "master"
|
||||
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
|
||||
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||
} else {
|
||||
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
|
||||
if err != nil {
|
||||
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
Logf("Gathered ClusterAutoscaler metrics before test")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
func (f *Framework) AfterEach() {
|
||||
RemoveCleanupAction(f.cleanupHandle)
|
||||
|
||||
// DeleteNamespace at the very end in defer, to avoid any
|
||||
// expectation failures preventing deleting the namespace.
|
||||
defer func() {
|
||||
nsDeletionErrors := map[string]error{}
|
||||
// Whether to delete namespace is determined by 3 factors: delete-namespace flag, delete-namespace-on-failure flag and the test result
|
||||
// if delete-namespace set to false, namespace will always be preserved.
|
||||
// if delete-namespace is true and delete-namespace-on-failure is false, namespace will be preserved if test failed.
|
||||
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !CurrentGinkgoTestDescription().Failed) {
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
|
||||
timeout := DefaultNamespaceDeletionTimeout
|
||||
if f.NamespaceDeletionTimeout != 0 {
|
||||
timeout = f.NamespaceDeletionTimeout
|
||||
}
|
||||
if err := deleteNS(f.ClientSet, f.ClientPool, ns.Name, timeout); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
} else {
|
||||
Logf("Namespace %v was already deleted", ns.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !TestContext.DeleteNamespace {
|
||||
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||
} else {
|
||||
Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
|
||||
}
|
||||
}
|
||||
|
||||
// Paranoia-- prevent reuse!
|
||||
f.Namespace = nil
|
||||
f.ClientSet = nil
|
||||
f.namespacesToDelete = nil
|
||||
|
||||
// if we had errors deleting, report them now.
|
||||
if len(nsDeletionErrors) != 0 {
|
||||
messages := []string{}
|
||||
for namespaceKey, namespaceErr := range nsDeletionErrors {
|
||||
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
|
||||
}
|
||||
Failf(strings.Join(messages, ","))
|
||||
}
|
||||
}()
|
||||
|
||||
// Print events if the test failed.
|
||||
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
|
||||
if !f.SkipNamespaceCreation {
|
||||
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
||||
}
|
||||
|
||||
logFunc := Logf
|
||||
if TestContext.ReportDir != "" {
|
||||
filePath := path.Join(TestContext.ReportDir, "image-puller.txt")
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
By(fmt.Sprintf("Failed to create a file with image-puller data %v: %v\nPrinting to stdout", filePath, err))
|
||||
} else {
|
||||
By(fmt.Sprintf("Dumping a list of prepulled images on each node to file %v", filePath))
|
||||
defer file.Close()
|
||||
if err = file.Chmod(0644); err != nil {
|
||||
Logf("Failed to chmod to 644 of %v: %v", filePath, err)
|
||||
}
|
||||
logFunc = GetLogToFileFunc(file)
|
||||
}
|
||||
} else {
|
||||
By("Dumping a list of prepulled images on each node...")
|
||||
}
|
||||
LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", logFunc)
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
|
||||
By("Collecting resource usage data")
|
||||
summary, resourceViolationError := f.gatherer.StopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)
|
||||
defer ExpectNoError(resourceViolationError)
|
||||
f.TestSummaries = append(f.TestSummaries, summary)
|
||||
}
|
||||
|
||||
if TestContext.GatherLogsSizes {
|
||||
By("Gathering log sizes data")
|
||||
close(f.logsSizeCloseChannel)
|
||||
f.logsSizeWaitGroup.Wait()
|
||||
f.TestSummaries = append(f.TestSummaries, f.logsSizeVerifier.GetSummary())
|
||||
}
|
||||
|
||||
if TestContext.GatherMetricsAfterTest != "false" {
|
||||
By("Gathering metrics")
|
||||
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
|
||||
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
|
||||
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
|
||||
}
|
||||
(*MetricsForE2E)(&received).computeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
|
||||
f.TestSummaries = append(f.TestSummaries, (*MetricsForE2E)(&received))
|
||||
}
|
||||
}
|
||||
|
||||
if TestContext.CloudConfig.KubemarkController != nil {
|
||||
close(f.kubemarkControllerCloseChannel)
|
||||
}
|
||||
|
||||
PrintSummaries(f.TestSummaries, f.BaseName)
|
||||
|
||||
// Check whether all nodes are ready after the test.
|
||||
// This is explicitly done at the very end of the test, to avoid
|
||||
// e.g. not removing namespace in case of this failure.
|
||||
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
|
||||
Failf("All nodes should be ready after test, %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) {
|
||||
createTestingNS := TestContext.CreateTestingNS
|
||||
if createTestingNS == nil {
|
||||
createTestingNS = CreateTestingNS
|
||||
}
|
||||
ns, err := createTestingNS(baseName, f.ClientSet, labels)
|
||||
// check ns instead of err to see if it's nil as we may
|
||||
// fail to create serviceAccount in it.
|
||||
// In this case, we should not forget to delete the namespace.
|
||||
if ns != nil {
|
||||
f.namespacesToDelete = append(f.namespacesToDelete, ns)
|
||||
}
|
||||
|
||||
if !f.SkipPrivilegedPSPBinding {
|
||||
CreatePrivilegedPSPBinding(f, ns.Name)
|
||||
}
|
||||
|
||||
return ns, err
|
||||
}
|
||||
|
||||
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
|
||||
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
|
||||
return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodNotFound waits for the pod to be completely terminated (not "Get-able").
|
||||
func (f *Framework) WaitForPodNotFound(podName string, timeout time.Duration) error {
|
||||
return waitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunning waits for the pod to run in the namespace.
|
||||
func (f *Framework) WaitForPodRunning(podName string) error {
|
||||
return WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodReady waits for the pod to flip to ready in the namespace.
|
||||
func (f *Framework) WaitForPodReady(podName string) error {
|
||||
return waitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunningSlow waits for the pod to run in the namespace.
|
||||
// It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout).
|
||||
func (f *Framework) WaitForPodRunningSlow(podName string) error {
|
||||
return waitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either
|
||||
// success or failure.
|
||||
func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
|
||||
return WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a substring matcher.
|
||||
func (f *Framework) TestContainerOutput(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, ContainSubstring)
|
||||
}
|
||||
|
||||
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a regexp matcher.
|
||||
func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, MatchRegexp)
|
||||
}
|
||||
|
||||
// Write a file using kubectl exec echo <contents> > <path> via specified container
|
||||
// Because of the primitive technique we're using here, we only allow ASCII alphanumeric characters
|
||||
func (f *Framework) WriteFileViaContainer(podName, containerName string, path string, contents string) error {
|
||||
By("writing a file in the container")
|
||||
allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
for _, c := range contents {
|
||||
if !strings.ContainsRune(allowedCharacters, c) {
|
||||
return fmt.Errorf("Unsupported character in string to write: %v", c)
|
||||
}
|
||||
}
|
||||
command := fmt.Sprintf("echo '%s' > '%s'", contents, path)
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Read a file using kubectl exec cat <path>
|
||||
func (f *Framework) ReadFileViaContainer(podName, containerName string, path string) (string, error) {
|
||||
By("reading a file in the container")
|
||||
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
|
||||
func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path string) (string, error) {
|
||||
By("checking a file size in the container")
|
||||
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
|
||||
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
|
||||
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (error, *v1.Service) {
|
||||
var err error = nil
|
||||
theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
|
||||
f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
|
||||
if block {
|
||||
err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector)))
|
||||
}
|
||||
return err, theService
|
||||
}
|
||||
|
||||
// CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label.
|
||||
func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName string) *v1.Service {
|
||||
if appName == "" {
|
||||
panic(fmt.Sprintf("no app name provided"))
|
||||
}
|
||||
|
||||
serviceSelector := map[string]string{
|
||||
"app": appName + "-pod",
|
||||
}
|
||||
|
||||
// For convenience, user sending ports are optional.
|
||||
portsFunc := func() []v1.ServicePort {
|
||||
if contPort < 1 || svcPort < 1 {
|
||||
return nil
|
||||
} else {
|
||||
return []v1.ServicePort{{
|
||||
Protocol: "TCP",
|
||||
Port: int32(svcPort),
|
||||
TargetPort: intstr.FromInt(contPort),
|
||||
}}
|
||||
}
|
||||
}
|
||||
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
|
||||
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service-for-" + appName,
|
||||
Labels: map[string]string{
|
||||
"app": appName + "-service",
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: portsFunc(),
|
||||
Selector: serviceSelector,
|
||||
},
|
||||
})
|
||||
ExpectNoError(err)
|
||||
return service
|
||||
}
|
||||
|
||||
// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
||||
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
||||
nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
labels := map[string]string{
|
||||
"app": appName + "-pod",
|
||||
}
|
||||
for i, node := range nodes.Items {
|
||||
// one per node, but no more than maxCount.
|
||||
if i <= maxCount {
|
||||
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: podSpec(node),
|
||||
})
|
||||
ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
type KubeUser struct {
|
||||
Name string `yaml:"name"`
|
||||
User struct {
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
Token string `yaml:"token"`
|
||||
} `yaml:"user"`
|
||||
}
|
||||
|
||||
type KubeCluster struct {
|
||||
Name string `yaml:"name"`
|
||||
Cluster struct {
|
||||
CertificateAuthorityData string `yaml:"certificate-authority-data"`
|
||||
Server string `yaml:"server"`
|
||||
} `yaml:"cluster"`
|
||||
}
|
||||
|
||||
type KubeConfig struct {
|
||||
Contexts []struct {
|
||||
Name string `yaml:"name"`
|
||||
Context struct {
|
||||
Cluster string `yaml:"cluster"`
|
||||
User string
|
||||
} `yaml:"context"`
|
||||
} `yaml:"contexts"`
|
||||
|
||||
Clusters []KubeCluster `yaml:"clusters"`
|
||||
|
||||
Users []KubeUser `yaml:"users"`
|
||||
}
|
||||
|
||||
func (kc *KubeConfig) FindUser(name string) *KubeUser {
|
||||
for _, user := range kc.Users {
|
||||
if user.Name == name {
|
||||
return &user
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
|
||||
for _, cluster := range kc.Clusters {
|
||||
if cluster.Name == name {
|
||||
return &cluster
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||
if numRetries > 0 {
|
||||
Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
}
|
||||
|
||||
stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
||||
// Retry on "i/o timeout" errors
|
||||
Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
||||
// Retry on "container not found" errors
|
||||
Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return stdOutBytes, stdErrBytes, err
|
||||
}
|
||||
err := fmt.Errorf("Failed: kubectl exec failed %d times with \"i/o timeout\". Giving up.", maxKubectlExecRetries)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
func kubectlExec(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmdArgs := []string{
|
||||
"exec",
|
||||
fmt.Sprintf("--namespace=%v", namespace),
|
||||
podName,
|
||||
fmt.Sprintf("-c=%v", containerName),
|
||||
}
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
|
||||
cmd := KubectlCmd(cmdArgs...)
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
err := cmd.Run()
|
||||
return stdout.Bytes(), stderr.Bytes(), err
|
||||
}
|
||||
|
||||
// Wrapper function for ginkgo describe. Adds namespacing.
|
||||
// TODO: Support type safe tagging as well https://github.com/kubernetes/kubernetes/pull/22401.
|
||||
func KubeDescribe(text string, body func()) bool {
|
||||
return Describe("[k8s.io] "+text, body)
|
||||
}
|
||||
|
||||
// Wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
|
||||
func ConformanceIt(text string, body interface{}, timeout ...float64) bool {
|
||||
return It(text+" [Conformance]", body, timeout...)
|
||||
}
|
||||
|
||||
// PodStateVerification represents a verification of pod state.
|
||||
// Any time you have a set of pods that you want to operate against or query,
|
||||
// this struct can be used to declaratively identify those pods.
|
||||
type PodStateVerification struct {
|
||||
// Optional: only pods that have k=v labels will pass this filter.
|
||||
Selectors map[string]string
|
||||
|
||||
// Required: The phases which are valid for your pod.
|
||||
ValidPhases []v1.PodPhase
|
||||
|
||||
// Optional: only pods passing this function will pass the filter
|
||||
// Verify a pod.
|
||||
// As an optimization, in addition to specfying filter (boolean),
|
||||
// this function allows specifying an error as well.
|
||||
// The error indicates that the polling of the pod spectrum should stop.
|
||||
Verify func(v1.Pod) (bool, error)
|
||||
|
||||
// Optional: only pods with this name will pass the filter.
|
||||
PodName string
|
||||
}
|
||||
|
||||
type ClusterVerification struct {
|
||||
client clientset.Interface
|
||||
namespace *v1.Namespace // pointer rather than string, since ns isn't created until before each.
|
||||
podState PodStateVerification
|
||||
}
|
||||
|
||||
func (f *Framework) NewClusterVerification(namespace *v1.Namespace, filter PodStateVerification) *ClusterVerification {
|
||||
return &ClusterVerification{
|
||||
f.ClientSet,
|
||||
namespace,
|
||||
filter,
|
||||
}
|
||||
}
|
||||
|
||||
func passesPodNameFilter(pod v1.Pod, name string) bool {
|
||||
return name == "" || strings.Contains(pod.Name, name)
|
||||
}
|
||||
|
||||
func passesVerifyFilter(pod v1.Pod, verify func(p v1.Pod) (bool, error)) (bool, error) {
|
||||
if verify == nil {
|
||||
return true, nil
|
||||
} else {
|
||||
verified, err := verify(pod)
|
||||
// If an error is returned, by definition, pod verification fails
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else {
|
||||
return verified, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func passesPhasesFilter(pod v1.Pod, validPhases []v1.PodPhase) bool {
|
||||
passesPhaseFilter := false
|
||||
for _, phase := range validPhases {
|
||||
if pod.Status.Phase == phase {
|
||||
passesPhaseFilter = true
|
||||
}
|
||||
}
|
||||
return passesPhaseFilter
|
||||
}
|
||||
|
||||
// filterLabels returns a list of pods which have labels.
|
||||
func filterLabels(selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) {
|
||||
var err error
|
||||
var selector labels.Selector
|
||||
var pl *v1.PodList
|
||||
// List pods based on selectors. This might be a tiny optimization rather then filtering
|
||||
// everything manually.
|
||||
if len(selectors) > 0 {
|
||||
selector = labels.SelectorFromSet(labels.Set(selectors))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pl, err = cli.CoreV1().Pods(ns).List(options)
|
||||
} else {
|
||||
pl, err = cli.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
}
|
||||
return pl, err
|
||||
}
|
||||
|
||||
// filter filters pods which pass a filter. It can be used to compose
|
||||
// the more useful abstractions like ForEach, WaitFor, and so on, which
|
||||
// can be used directly by tests.
|
||||
func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) {
|
||||
if len(p.ValidPhases) == 0 || namespace == nil {
|
||||
panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace))
|
||||
}
|
||||
|
||||
ns := namespace.Name
|
||||
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
|
||||
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
|
||||
if len(pl.Items) == 0 || err != nil {
|
||||
return pl.Items, err
|
||||
}
|
||||
|
||||
unfilteredPods := pl.Items
|
||||
filteredPods := []v1.Pod{}
|
||||
ReturnPodsSoFar:
|
||||
// Next: Pod must match at least one of the states that the user specified
|
||||
for _, pod := range unfilteredPods {
|
||||
if !(passesPhasesFilter(pod, p.ValidPhases) && passesPodNameFilter(pod, p.PodName)) {
|
||||
continue
|
||||
}
|
||||
passesVerify, err := passesVerifyFilter(pod, p.Verify)
|
||||
if err != nil {
|
||||
Logf("Error detected on %v : %v !", pod.Name, err)
|
||||
break ReturnPodsSoFar
|
||||
}
|
||||
if passesVerify {
|
||||
filteredPods = append(filteredPods, pod)
|
||||
}
|
||||
}
|
||||
return filteredPods, err
|
||||
}
|
||||
|
||||
// WaitFor waits for some minimum number of pods to be verified, according to the PodStateVerification
|
||||
// definition.
|
||||
func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1.Pod, error) {
|
||||
pods := []v1.Pod{}
|
||||
var returnedErr error
|
||||
|
||||
err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
pods, returnedErr = cl.podState.filter(cl.client, cl.namespace)
|
||||
|
||||
// Failure
|
||||
if returnedErr != nil {
|
||||
Logf("Cutting polling short: We got an error from the pod filtering layer.")
|
||||
// stop polling if the pod filtering returns an error. that should never happen.
|
||||
// it indicates, for example, that the client is broken or something non-pod related.
|
||||
return false, returnedErr
|
||||
}
|
||||
Logf("Found %v / %v", len(pods), atLeast)
|
||||
|
||||
// Success
|
||||
if len(pods) >= atLeast {
|
||||
return true, nil
|
||||
}
|
||||
// Keep trying...
|
||||
return false, nil
|
||||
})
|
||||
Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
|
||||
return pods, err
|
||||
}
|
||||
|
||||
// WaitForOrFail provides a shorthand WaitFor with failure as an option if anything goes wrong.
|
||||
func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) {
|
||||
pods, err := cl.WaitFor(atLeast, timeout)
|
||||
if err != nil || len(pods) < atLeast {
|
||||
Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ForEach runs a function against every verifiable pod. Be warned that this doesn't wait for "n" pods to verifiy,
|
||||
// so it may return very quickly if you have strict pod state requirements.
|
||||
//
|
||||
// For example, if you require at least 5 pods to be running before your test will pass,
|
||||
// its smart to first call "clusterVerification.WaitFor(5)" before you call clusterVerification.ForEach.
|
||||
func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
|
||||
pods, err := cl.podState.filter(cl.client, cl.namespace)
|
||||
if err == nil {
|
||||
if len(pods) == 0 {
|
||||
Failf("No pods matched the filter.")
|
||||
}
|
||||
Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
|
||||
for _, p := range pods {
|
||||
podFunc(p)
|
||||
}
|
||||
} else {
|
||||
Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetLogToFileFunc is a convenience function that returns a function that have the same interface as
|
||||
// Logf, but writes to a specified file.
|
||||
func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) {
|
||||
return func(format string, args ...interface{}) {
|
||||
writer := bufio.NewWriter(file)
|
||||
if _, err := fmt.Fprintf(writer, format, args...); err != nil {
|
||||
Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
}
|
84
vendor/k8s.io/kubernetes/test/e2e/framework/get-kubemark-resource-usage.go
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/test/e2e/framework/get-kubemark-resource-usage.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type KubemarkResourceUsage struct {
|
||||
Name string
|
||||
MemoryWorkingSetInBytes uint64
|
||||
CPUUsageInCores float64
|
||||
}
|
||||
|
||||
func getMasterUsageByPrefix(prefix string) (string, error) {
|
||||
sshResult, err := SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return sshResult.Stdout, nil
|
||||
}
|
||||
|
||||
// TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework)
|
||||
func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage {
|
||||
result := make(map[string]*KubemarkResourceUsage)
|
||||
// Get kuberenetes component resource usage
|
||||
sshResult, err := getMasterUsageByPrefix("kube")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||
return nil
|
||||
}
|
||||
scanner := bufio.NewScanner(strings.NewReader(sshResult))
|
||||
for scanner.Scan() {
|
||||
var cpu float64
|
||||
var mem uint64
|
||||
var name string
|
||||
fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /usr/local/bin/kube-%s", &cpu, &mem, &name)
|
||||
if name != "" {
|
||||
// Gatherer expects pod_name/container_name format
|
||||
fullName := name + "/" + name
|
||||
result[fullName] = &KubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100}
|
||||
}
|
||||
}
|
||||
// Get etcd resource usage
|
||||
sshResult, err = getMasterUsageByPrefix("bin/etcd")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe")
|
||||
return nil
|
||||
}
|
||||
scanner = bufio.NewScanner(strings.NewReader(sshResult))
|
||||
for scanner.Scan() {
|
||||
var cpu float64
|
||||
var mem uint64
|
||||
var etcdKind string
|
||||
fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /bin/sh -c /usr/local/bin/etcd", &cpu, &mem)
|
||||
dataDirStart := strings.Index(scanner.Text(), "--data-dir")
|
||||
if dataDirStart < 0 {
|
||||
continue
|
||||
}
|
||||
fmt.Sscanf(scanner.Text()[dataDirStart:], "--data-dir=/var/%s", &etcdKind)
|
||||
if etcdKind != "" {
|
||||
// Gatherer expects pod_name/container_name format
|
||||
fullName := "etcd/" + etcdKind
|
||||
result[fullName] = &KubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
26
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/BUILD
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/BUILD
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["wrapper.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper",
|
||||
deps = ["//vendor/github.com/onsi/ginkgo:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
134
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
Normal file
134
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package ginkgowrapper wraps Ginkgo Fail and Skip functions to panic
|
||||
// with structured data instead of a constant string.
|
||||
package ginkgowrapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// FailurePanic is the value that will be panicked from Fail.
|
||||
type FailurePanic struct {
|
||||
Message string // The failure message passed to Fail
|
||||
Filename string // The filename that is the source of the failure
|
||||
Line int // The line number of the filename that is the source of the failure
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
// String makes FailurePanic look like the old Ginkgo panic when printed.
|
||||
func (FailurePanic) String() string { return ginkgo.GINKGO_PANIC }
|
||||
|
||||
// Fail wraps ginkgo.Fail so that it panics with more useful
|
||||
// information about the failure. This function will panic with a
|
||||
// FailurePanic.
|
||||
func Fail(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
fp := FailurePanic{
|
||||
Message: message,
|
||||
Filename: file,
|
||||
Line: line,
|
||||
FullStackTrace: pruneStack(skip),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
panic(fp)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.Fail(message, skip)
|
||||
}
|
||||
|
||||
// SkipPanic is the value that will be panicked from Skip.
|
||||
type SkipPanic struct {
|
||||
Message string // The failure message passed to Fail
|
||||
Filename string // The filename that is the source of the failure
|
||||
Line int // The line number of the filename that is the source of the failure
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
// String makes SkipPanic look like the old Ginkgo panic when printed.
|
||||
func (SkipPanic) String() string { return ginkgo.GINKGO_PANIC }
|
||||
|
||||
// Skip wraps ginkgo.Skip so that it panics with more useful
|
||||
// information about why the test is being skipped. This function will
|
||||
// panic with a SkipPanic.
|
||||
func Skip(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
sp := SkipPanic{
|
||||
Message: message,
|
||||
Filename: file,
|
||||
Line: line,
|
||||
FullStackTrace: pruneStack(skip),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
panic(sp)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.Skip(message, skip)
|
||||
}
|
||||
|
||||
// ginkgo adds a lot of test running infrastructure to the stack, so
|
||||
// we filter those out
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`)
|
||||
|
||||
func pruneStack(skip int) string {
|
||||
skip += 2 // one for pruneStack and one for debug.Stack
|
||||
stack := debug.Stack()
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
|
||||
var prunedStack []string
|
||||
|
||||
// skip the top of the stack
|
||||
for i := 0; i < 2*skip+1; i++ {
|
||||
scanner.Scan()
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
if stackSkipPattern.Match(scanner.Bytes()) {
|
||||
scanner.Scan() // these come in pairs
|
||||
} else {
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
scanner.Scan() // these come in pairs
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
198
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
Normal file
198
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
)
|
||||
|
||||
// TODO: These should really just use the GCE API client library or at least use
|
||||
// better formatted output from the --format flag.
|
||||
|
||||
func CreateGCEStaticIP(name string) (string, error) {
|
||||
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
|
||||
// NAME REGION ADDRESS STATUS
|
||||
// test-static-ip us-central1 104.197.143.7 RESERVED
|
||||
|
||||
var outputBytes []byte
|
||||
var err error
|
||||
region, err := gce.GetGCERegion(TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to convert zone to region: %v", err)
|
||||
}
|
||||
glog.Infof("Creating static IP with name %q in project %q in region %q", name, TestContext.CloudConfig.ProjectID, region)
|
||||
for attempts := 0; attempts < 4; attempts++ {
|
||||
outputBytes, err = exec.Command("gcloud", "compute", "addresses", "create",
|
||||
name, "--project", TestContext.CloudConfig.ProjectID,
|
||||
"--region", region, "-q", "--format=yaml").CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
glog.Errorf("output from failed attempt to create static IP: %s", outputBytes)
|
||||
time.Sleep(time.Duration(5*attempts) * time.Second)
|
||||
}
|
||||
if err != nil {
|
||||
// Ditch the error, since the stderr in the output is what actually contains
|
||||
// any useful info.
|
||||
return "", fmt.Errorf("failed to create static IP: %s", outputBytes)
|
||||
}
|
||||
output := string(outputBytes)
|
||||
if strings.Contains(output, "RESERVED") {
|
||||
r, _ := regexp.Compile("[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+")
|
||||
staticIP := r.FindString(output)
|
||||
if staticIP == "" {
|
||||
return "", fmt.Errorf("static IP not found in gcloud command output: %v", output)
|
||||
} else {
|
||||
return staticIP, nil
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("static IP %q could not be reserved: %v", name, output)
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteGCEStaticIP(name string) error {
|
||||
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
|
||||
// NAME REGION ADDRESS STATUS
|
||||
// test-static-ip us-central1 104.197.143.7 RESERVED
|
||||
|
||||
region, err := gce.GetGCERegion(TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert zone to region: %v", err)
|
||||
}
|
||||
glog.Infof("Deleting static IP with name %q in project %q in region %q", name, TestContext.CloudConfig.ProjectID, region)
|
||||
outputBytes, err := exec.Command("gcloud", "compute", "addresses", "delete",
|
||||
name, "--project", TestContext.CloudConfig.ProjectID,
|
||||
"--region", region, "-q").CombinedOutput()
|
||||
if err != nil {
|
||||
// Ditch the error, since the stderr in the output is what actually contains
|
||||
// any useful info.
|
||||
return fmt.Errorf("failed to delete static IP %q: %v", name, string(outputBytes))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns master & node image string, or error
|
||||
func lookupClusterImageSources() (string, string, error) {
|
||||
// Given args for a gcloud compute command, run it with other args, and return the values,
|
||||
// whether separated by newlines, commas or semicolons.
|
||||
gcloudf := func(argv ...string) ([]string, error) {
|
||||
args := []string{"compute"}
|
||||
args = append(args, argv...)
|
||||
args = append(args, "--project", TestContext.CloudConfig.ProjectID,
|
||||
"--zone", TestContext.CloudConfig.Zone)
|
||||
outputBytes, err := exec.Command("gcloud", args...).CombinedOutput()
|
||||
str := strings.Replace(string(outputBytes), ",", "\n", -1)
|
||||
str = strings.Replace(str, ";", "\n", -1)
|
||||
lines := strings.Split(str, "\n")
|
||||
if err != nil {
|
||||
Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
|
||||
for _, l := range lines {
|
||||
Logf(" > %s", l)
|
||||
}
|
||||
}
|
||||
return lines, err
|
||||
}
|
||||
|
||||
// Given a GCE instance, look through its disks, finding one that has a sourceImage
|
||||
host2image := func(instance string) (string, error) {
|
||||
// gcloud compute instances describe {INSTANCE} --format="get(disks[].source)"
|
||||
// gcloud compute disks describe {DISKURL} --format="get(sourceImage)"
|
||||
disks, err := gcloudf("instances", "describe", instance, "--format=get(disks[].source)")
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if len(disks) == 0 {
|
||||
return "", fmt.Errorf("instance %q had no findable disks", instance)
|
||||
}
|
||||
// Loop over disks, looking for the boot disk
|
||||
for _, disk := range disks {
|
||||
lines, err := gcloudf("disks", "describe", disk, "--format=get(sourceImage)")
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if len(lines) > 0 && lines[0] != "" {
|
||||
return lines[0], nil // break, we're done
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("instance %q had no disk with a sourceImage", instance)
|
||||
}
|
||||
|
||||
// gcloud compute instance-groups list-instances {GROUPNAME} --format="get(instance)"
|
||||
nodeName := ""
|
||||
instGroupName := strings.Split(TestContext.CloudConfig.NodeInstanceGroup, ",")[0]
|
||||
if lines, err := gcloudf("instance-groups", "list-instances", instGroupName, "--format=get(instance)"); err != nil {
|
||||
return "", "", err
|
||||
} else if len(lines) == 0 {
|
||||
return "", "", fmt.Errorf("no instances inside instance-group %q", instGroupName)
|
||||
} else {
|
||||
nodeName = lines[0]
|
||||
}
|
||||
|
||||
nodeImg, err := host2image(nodeName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
frags := strings.Split(nodeImg, "/")
|
||||
nodeImg = frags[len(frags)-1]
|
||||
|
||||
// For GKE clusters, MasterName will not be defined; we just leave masterImg blank.
|
||||
masterImg := ""
|
||||
if masterName := TestContext.CloudConfig.MasterName; masterName != "" {
|
||||
img, err := host2image(masterName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
frags = strings.Split(img, "/")
|
||||
masterImg = frags[len(frags)-1]
|
||||
}
|
||||
|
||||
return masterImg, nodeImg, nil
|
||||
}
|
||||
|
||||
func LogClusterImageSources() {
|
||||
masterImg, nodeImg, err := lookupClusterImageSources()
|
||||
if err != nil {
|
||||
Logf("Cluster image sources lookup failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
Logf("cluster-master-image: %s", masterImg)
|
||||
Logf("cluster-node-image: %s", nodeImg)
|
||||
|
||||
images := map[string]string{
|
||||
"master_os_image": masterImg,
|
||||
"node_os_image": nodeImg,
|
||||
}
|
||||
|
||||
outputBytes, _ := json.MarshalIndent(images, "", " ")
|
||||
filePath := filepath.Join(TestContext.ReportDir, "images.json")
|
||||
if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil {
|
||||
Logf("cluster images sources, could not write to %q: %v", filePath, err)
|
||||
}
|
||||
}
|
76
vendor/k8s.io/kubernetes/test/e2e/framework/gpu_util.go
generated
vendored
Normal file
76
vendor/k8s.io/kubernetes/test/e2e/framework/gpu_util.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// GPUResourceName is the extended name of the GPU resource since v1.8
|
||||
// this uses the device plugin mechanism
|
||||
NVIDIAGPUResourceName = "nvidia.com/gpu"
|
||||
|
||||
// TODO: Parametrize it by making it a feature in TestFramework.
|
||||
// so we can override the daemonset in other setups (non COS).
|
||||
// GPUDevicePluginDSYAML is the official Google Device Plugin Daemonset NVIDIA GPU manifest for GKE
|
||||
GPUDevicePluginDSYAML = "https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml"
|
||||
)
|
||||
|
||||
// TODO make this generic and not linked to COS only
|
||||
// NumberOfGPUs returs the number of GPUs advertised by a node
|
||||
// This is based on the Device Plugin system and expected to run on a COS based node
|
||||
// After the NVIDIA drivers were installed
|
||||
func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
|
||||
val, ok := node.Status.Capacity[NVIDIAGPUResourceName]
|
||||
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
return val.Value()
|
||||
}
|
||||
|
||||
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
|
||||
func NVIDIADevicePlugin(ns string) *v1.Pod {
|
||||
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
p := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
|
||||
Namespace: ns,
|
||||
},
|
||||
|
||||
Spec: ds.Spec.Template.Spec,
|
||||
}
|
||||
// Remove node affinity
|
||||
p.Spec.Affinity = nil
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func GetGPUDevicePluginImage() string {
|
||||
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
||||
if err != nil || ds == nil || len(ds.Spec.Template.Spec.Containers) < 1 {
|
||||
return ""
|
||||
}
|
||||
return ds.Spec.Template.Spec.Containers[0].Image
|
||||
}
|
1196
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
Normal file
1196
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
259
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
Normal file
259
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
Normal file
@ -0,0 +1,259 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
// How long to wait for a job to finish.
|
||||
JobTimeout = 15 * time.Minute
|
||||
|
||||
// Job selector name
|
||||
JobSelectorKey = "job"
|
||||
)
|
||||
|
||||
// NewTestJob returns a Job which does one of several testing behaviors. notTerminate starts a Job that will run
|
||||
// effectively forever. fail starts a Job that will fail immediately. succeed starts a Job that will succeed
|
||||
// immediately. randomlySucceedOrFail starts a Job that will succeed or fail randomly. failOnce fails the Job the
|
||||
// first time it is run and succeeds subsequently. name is the Name of the Job. RestartPolicy indicates the restart
|
||||
// policy of the containers in which the Pod is running. Parallelism is the Job's parallelism, and completions is the
|
||||
// Job's required number of completions.
|
||||
func NewTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32, activeDeadlineSeconds *int64, backoffLimit int32) *batch.Job {
|
||||
job := &batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Job",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
ActiveDeadlineSeconds: activeDeadlineSeconds,
|
||||
Parallelism: ¶llelism,
|
||||
Completions: &completions,
|
||||
BackoffLimit: &backoffLimit,
|
||||
ManualSelector: newBool(false),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{JobSelectorKey: name},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: rPol,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "data",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c",
|
||||
Image: BusyBoxImage,
|
||||
Command: []string{},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "/data",
|
||||
Name: "data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
switch behavior {
|
||||
case "notTerminate":
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"}
|
||||
case "fail":
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 1"}
|
||||
case "succeed":
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
|
||||
case "randomlySucceedOrFail":
|
||||
// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
|
||||
// Dividing by 16384 gives roughly 50/50 chance of success.
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
|
||||
case "failOnce":
|
||||
// Fail the first the container of the pod is run, and
|
||||
// succeed the second time. Checks for file on emptydir.
|
||||
// If present, succeed. If not, create but fail.
|
||||
// Note that this cannot be used with RestartNever because
|
||||
// it always fails the first time for a pod.
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "if [[ -r /data/foo ]] ; then exit 0 ; else touch /data/foo ; exit 1 ; fi"}
|
||||
}
|
||||
return job
|
||||
}
|
||||
|
||||
// GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid.
|
||||
func GetJob(c clientset.Interface, ns, name string) (*batch.Job, error) {
|
||||
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// CreateJob uses c to create job in namespace ns. If the returned error is nil, the returned Job is valid and has
|
||||
// been created.
|
||||
func CreateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
|
||||
return c.BatchV1().Jobs(ns).Create(job)
|
||||
}
|
||||
|
||||
// UpdateJob uses c to updated job in namespace ns. If the returned error is nil, the returned Job is valid and has
|
||||
// been updated.
|
||||
func UpdateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
|
||||
return c.BatchV1().Jobs(ns).Update(job)
|
||||
}
|
||||
|
||||
// UpdateJobFunc updates the job object. It retries if there is a conflict, throw out error if
|
||||
// there is any other errors. name is the job name, updateFn is the function updating the
|
||||
// job object.
|
||||
func UpdateJobFunc(c clientset.Interface, ns, name string, updateFn func(job *batch.Job)) {
|
||||
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
job, err := GetJob(c, ns, name)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
|
||||
}
|
||||
updateFn(job)
|
||||
_, err = UpdateJob(c, ns, job)
|
||||
if err == nil {
|
||||
Logf("Successfully updated job %q", name)
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsConflict(err) {
|
||||
Logf("Conflicting update to job %q, re-get and re-update: %v", name, err)
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update job %q: %v", name, err)
|
||||
}))
|
||||
}
|
||||
|
||||
// DeleteJob uses c to delete the Job named name in namespace ns. If the returned error is nil, the Job has been
|
||||
// deleted.
|
||||
func DeleteJob(c clientset.Interface, ns, name string) error {
|
||||
return c.BatchV1().Jobs(ns).Delete(name, nil)
|
||||
}
|
||||
|
||||
// GetJobPods returns a list of Pods belonging to a Job.
|
||||
func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return c.CoreV1().Pods(ns).List(options)
|
||||
}
|
||||
|
||||
// WaitForAllJobPodsRunning wait for all pods for the Job named JobName in namespace ns to become Running. Only use
|
||||
// when pods will run for a long time, or it will be racy.
|
||||
func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
|
||||
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
|
||||
pods, err := GetJobPods(c, ns, jobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
count := int32(0)
|
||||
for _, p := range pods.Items {
|
||||
if p.Status.Phase == v1.PodRunning {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count == parallelism, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFinish uses c to wait for compeletions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return curr.Status.Succeeded == completions, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
|
||||
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
|
||||
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, c := range curr.Status.Conditions {
|
||||
if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
|
||||
if reason == "" || reason == c.Reason {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// CheckForAllJobPodsRunning uses c to check in the Job named jobName in ns is running. If the returned error is not
|
||||
// nil the returned bool is true if the Job is running.
|
||||
func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
count := int32(0)
|
||||
for _, p := range pods.Items {
|
||||
if p.Status.Phase == v1.PodRunning {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count == parallelism, nil
|
||||
}
|
||||
|
||||
func newBool(val bool) *bool {
|
||||
p := new(bool)
|
||||
*p = val
|
||||
return p
|
||||
}
|
||||
|
||||
type updateJobFunc func(*batch.Job)
|
||||
|
||||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
|
||||
jobs := c.BatchV1().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(job)
|
||||
if job, err = jobs.Update(job); err == nil {
|
||||
Logf("Updating job %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr)
|
||||
}
|
||||
return job, pollErr
|
||||
}
|
852
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
Normal file
852
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
Normal file
@ -0,0 +1,852 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
dockermetrics "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// KubeletMetric stores metrics scraped from the kubelet server's /metric endpoint.
|
||||
// TODO: Get some more structure around the metrics and this type
|
||||
type KubeletLatencyMetric struct {
|
||||
// eg: list, info, create
|
||||
Operation string
|
||||
// eg: sync_pods, pod_worker
|
||||
Method string
|
||||
// 0 <= quantile <=1, e.g. 0.95 is 95%tile, 0.5 is median.
|
||||
Quantile float64
|
||||
Latency time.Duration
|
||||
}
|
||||
|
||||
// KubeletMetricByLatency implements sort.Interface for []KubeletMetric based on
|
||||
// the latency field.
|
||||
type KubeletLatencyMetrics []KubeletLatencyMetric
|
||||
|
||||
func (a KubeletLatencyMetrics) Len() int { return len(a) }
|
||||
func (a KubeletLatencyMetrics) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a KubeletLatencyMetrics) Less(i, j int) bool { return a[i].Latency > a[j].Latency }
|
||||
|
||||
// If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber;
|
||||
// or else, the function will try to get kubelet metrics directly from the node.
|
||||
func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) {
|
||||
if c == nil {
|
||||
return metrics.GrabKubeletMetricsWithoutProxy(nodeName)
|
||||
}
|
||||
grabber, err := metrics.NewMetricsGrabber(c, nil, true, false, false, false, false)
|
||||
if err != nil {
|
||||
return metrics.KubeletMetrics{}, err
|
||||
}
|
||||
return grabber.GrabFromKubelet(nodeName)
|
||||
}
|
||||
|
||||
// getKubeletMetrics gets all metrics in kubelet subsystem from specified node and trims
|
||||
// the subsystem prefix.
|
||||
func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) {
|
||||
ms, err := getKubeletMetricsFromNode(c, nodeName)
|
||||
if err != nil {
|
||||
return metrics.KubeletMetrics{}, err
|
||||
}
|
||||
|
||||
kubeletMetrics := make(metrics.KubeletMetrics)
|
||||
for name, samples := range ms {
|
||||
const prefix = kubeletmetrics.KubeletSubsystem + "_"
|
||||
if !strings.HasPrefix(name, prefix) {
|
||||
// Not a kubelet metric.
|
||||
continue
|
||||
}
|
||||
method := strings.TrimPrefix(name, prefix)
|
||||
kubeletMetrics[method] = samples
|
||||
}
|
||||
return kubeletMetrics, nil
|
||||
}
|
||||
|
||||
// GetKubeletLatencyMetrics gets all latency related kubelet metrics. Note that the KubeletMetrcis
|
||||
// passed in should not contain subsystem prefix.
|
||||
func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
|
||||
latencyMethods := sets.NewString(
|
||||
kubeletmetrics.PodWorkerLatencyKey,
|
||||
kubeletmetrics.PodWorkerStartLatencyKey,
|
||||
kubeletmetrics.PodStartLatencyKey,
|
||||
kubeletmetrics.CgroupManagerOperationsKey,
|
||||
dockermetrics.DockerOperationsLatencyKey,
|
||||
kubeletmetrics.PodWorkerStartLatencyKey,
|
||||
kubeletmetrics.PLEGRelistLatencyKey,
|
||||
)
|
||||
return GetKubeletMetrics(ms, latencyMethods)
|
||||
}
|
||||
|
||||
func GetKubeletMetrics(ms metrics.KubeletMetrics, methods sets.String) KubeletLatencyMetrics {
|
||||
var latencyMetrics KubeletLatencyMetrics
|
||||
for method, samples := range ms {
|
||||
if !methods.Has(method) {
|
||||
continue
|
||||
}
|
||||
for _, sample := range samples {
|
||||
latency := sample.Value
|
||||
operation := string(sample.Metric["operation_type"])
|
||||
var quantile float64
|
||||
if val, ok := sample.Metric[model.QuantileLabel]; ok {
|
||||
var err error
|
||||
if quantile, err = strconv.ParseFloat(string(val), 64); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
latencyMetrics = append(latencyMetrics, KubeletLatencyMetric{
|
||||
Operation: operation,
|
||||
Method: method,
|
||||
Quantile: quantile,
|
||||
Latency: time.Duration(int64(latency)) * time.Microsecond,
|
||||
})
|
||||
}
|
||||
}
|
||||
return latencyMetrics
|
||||
}
|
||||
|
||||
// RuntimeOperationMonitor is the tool getting and parsing docker operation metrics.
|
||||
type RuntimeOperationMonitor struct {
|
||||
client clientset.Interface
|
||||
nodesRuntimeOps map[string]NodeRuntimeOperationErrorRate
|
||||
}
|
||||
|
||||
// NodeRuntimeOperationErrorRate is the runtime operation error rate on one node.
|
||||
type NodeRuntimeOperationErrorRate map[string]*RuntimeOperationErrorRate
|
||||
|
||||
// RuntimeOperationErrorRate is the error rate of a specified runtime operation.
|
||||
type RuntimeOperationErrorRate struct {
|
||||
TotalNumber float64
|
||||
ErrorRate float64
|
||||
TimeoutRate float64
|
||||
}
|
||||
|
||||
func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor {
|
||||
m := &RuntimeOperationMonitor{
|
||||
client: c,
|
||||
nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate),
|
||||
}
|
||||
nodes, err := m.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
m.nodesRuntimeOps[node.Name] = make(NodeRuntimeOperationErrorRate)
|
||||
}
|
||||
// Initialize the runtime operation error rate
|
||||
m.GetRuntimeOperationErrorRate()
|
||||
return m
|
||||
}
|
||||
|
||||
// GetRuntimeOperationErrorRate gets runtime operation records from kubelet metrics and calculate
|
||||
// error rates of all runtime operations.
|
||||
func (m *RuntimeOperationMonitor) GetRuntimeOperationErrorRate() map[string]NodeRuntimeOperationErrorRate {
|
||||
for node := range m.nodesRuntimeOps {
|
||||
nodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node)
|
||||
if err != nil {
|
||||
Logf("GetRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
|
||||
continue
|
||||
}
|
||||
m.nodesRuntimeOps[node] = nodeResult
|
||||
}
|
||||
return m.nodesRuntimeOps
|
||||
}
|
||||
|
||||
// GetLatestRuntimeOperationErrorRate gets latest error rate and timeout rate from last observed RuntimeOperationErrorRate.
|
||||
func (m *RuntimeOperationMonitor) GetLatestRuntimeOperationErrorRate() map[string]NodeRuntimeOperationErrorRate {
|
||||
result := make(map[string]NodeRuntimeOperationErrorRate)
|
||||
for node := range m.nodesRuntimeOps {
|
||||
result[node] = make(NodeRuntimeOperationErrorRate)
|
||||
oldNodeResult := m.nodesRuntimeOps[node]
|
||||
curNodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node)
|
||||
if err != nil {
|
||||
Logf("GetLatestRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
|
||||
continue
|
||||
}
|
||||
for op, cur := range curNodeResult {
|
||||
t := *cur
|
||||
if old, found := oldNodeResult[op]; found {
|
||||
t.ErrorRate = (t.ErrorRate*t.TotalNumber - old.ErrorRate*old.TotalNumber) / (t.TotalNumber - old.TotalNumber)
|
||||
t.TimeoutRate = (t.TimeoutRate*t.TotalNumber - old.TimeoutRate*old.TotalNumber) / (t.TotalNumber - old.TotalNumber)
|
||||
t.TotalNumber -= old.TotalNumber
|
||||
}
|
||||
result[node][op] = &t
|
||||
}
|
||||
m.nodesRuntimeOps[node] = curNodeResult
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// FormatRuntimeOperationErrorRate formats the runtime operation error rate to string.
|
||||
func FormatRuntimeOperationErrorRate(nodesResult map[string]NodeRuntimeOperationErrorRate) string {
|
||||
lines := []string{}
|
||||
for node, nodeResult := range nodesResult {
|
||||
lines = append(lines, fmt.Sprintf("node %q runtime operation error rate:", node))
|
||||
for op, result := range nodeResult {
|
||||
line := fmt.Sprintf("operation %q: total - %.0f; error rate - %f; timeout rate - %f", op,
|
||||
result.TotalNumber, result.ErrorRate, result.TimeoutRate)
|
||||
lines = append(lines, line)
|
||||
}
|
||||
lines = append(lines, fmt.Sprintln())
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// getNodeRuntimeOperationErrorRate gets runtime operation error rate from specified node.
|
||||
func getNodeRuntimeOperationErrorRate(c clientset.Interface, node string) (NodeRuntimeOperationErrorRate, error) {
|
||||
result := make(NodeRuntimeOperationErrorRate)
|
||||
ms, err := getKubeletMetrics(c, node)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
// If no corresponding metrics are found, the returned samples will be empty. Then the following
|
||||
// loop will be skipped automatically.
|
||||
allOps := ms[dockermetrics.DockerOperationsKey]
|
||||
errOps := ms[dockermetrics.DockerOperationsErrorsKey]
|
||||
timeoutOps := ms[dockermetrics.DockerOperationsTimeoutKey]
|
||||
for _, sample := range allOps {
|
||||
operation := string(sample.Metric["operation_type"])
|
||||
result[operation] = &RuntimeOperationErrorRate{TotalNumber: float64(sample.Value)}
|
||||
}
|
||||
for _, sample := range errOps {
|
||||
operation := string(sample.Metric["operation_type"])
|
||||
// Should always find the corresponding item, just in case
|
||||
if _, found := result[operation]; found {
|
||||
result[operation].ErrorRate = float64(sample.Value) / result[operation].TotalNumber
|
||||
}
|
||||
}
|
||||
for _, sample := range timeoutOps {
|
||||
operation := string(sample.Metric["operation_type"])
|
||||
if _, found := result[operation]; found {
|
||||
result[operation].TimeoutRate = float64(sample.Value) / result[operation].TotalNumber
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// HighLatencyKubeletOperations logs and counts the high latency metrics exported by the kubelet server via /metrics.
|
||||
func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) {
|
||||
ms, err := getKubeletMetrics(c, nodeName)
|
||||
if err != nil {
|
||||
return KubeletLatencyMetrics{}, err
|
||||
}
|
||||
latencyMetrics := GetKubeletLatencyMetrics(ms)
|
||||
sort.Sort(latencyMetrics)
|
||||
var badMetrics KubeletLatencyMetrics
|
||||
logFunc("\nLatency metrics for node %v", nodeName)
|
||||
for _, m := range latencyMetrics {
|
||||
if m.Latency > threshold {
|
||||
badMetrics = append(badMetrics, m)
|
||||
Logf("%+v", m)
|
||||
}
|
||||
}
|
||||
return badMetrics, nil
|
||||
}
|
||||
|
||||
// getStatsSummary contacts kubelet for the container information.
|
||||
func getStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
data, err := c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
Do().Raw()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
summary := stats.Summary{}
|
||||
err = json.Unmarshal(data, &summary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
func removeUint64Ptr(ptr *uint64) uint64 {
|
||||
if ptr == nil {
|
||||
return 0
|
||||
}
|
||||
return *ptr
|
||||
}
|
||||
|
||||
// getOneTimeResourceUsageOnNode queries the node's /stats/summary endpoint
|
||||
// and returns the resource usage of all containerNames for the past
|
||||
// cpuInterval.
|
||||
// The acceptable range of the interval is 2s~120s. Be warned that as the
|
||||
// interval (and #containers) increases, the size of kubelet's response
|
||||
// could be significant. E.g., the 60s interval stats for ~20 containers is
|
||||
// ~1.5MB. Don't hammer the node with frequent, heavy requests.
|
||||
//
|
||||
// cadvisor records cumulative cpu usage in nanoseconds, so we need to have two
|
||||
// stats points to compute the cpu usage over the interval. Assuming cadvisor
|
||||
// polls every second, we'd need to get N stats points for N-second interval.
|
||||
// Note that this is an approximation and may not be accurate, hence we also
|
||||
// write the actual interval used for calculation (based on the timestamps of
|
||||
// the stats points in ContainerResourceUsage.CPUInterval.
|
||||
//
|
||||
// containerNames is a function returning a collection of container names in which
|
||||
// user is interested in.
|
||||
func getOneTimeResourceUsageOnNode(
|
||||
c clientset.Interface,
|
||||
nodeName string,
|
||||
cpuInterval time.Duration,
|
||||
containerNames func() []string,
|
||||
) (ResourceUsagePerContainer, error) {
|
||||
const (
|
||||
// cadvisor records stats about every second.
|
||||
cadvisorStatsPollingIntervalInSeconds float64 = 1.0
|
||||
// cadvisor caches up to 2 minutes of stats (configured by kubelet).
|
||||
maxNumStatsToRequest int = 120
|
||||
)
|
||||
|
||||
numStats := int(float64(cpuInterval.Seconds()) / cadvisorStatsPollingIntervalInSeconds)
|
||||
if numStats < 2 || numStats > maxNumStatsToRequest {
|
||||
return nil, fmt.Errorf("numStats needs to be > 1 and < %d", maxNumStatsToRequest)
|
||||
}
|
||||
// Get information of all containers on the node.
|
||||
summary, err := getStatsSummary(c, nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := func(name string, newStats *stats.ContainerStats) *ContainerResourceUsage {
|
||||
if newStats == nil || newStats.CPU == nil || newStats.Memory == nil {
|
||||
return nil
|
||||
}
|
||||
return &ContainerResourceUsage{
|
||||
Name: name,
|
||||
Timestamp: newStats.StartTime.Time,
|
||||
CPUUsageInCores: float64(removeUint64Ptr(newStats.CPU.UsageNanoCores)) / 1000000000,
|
||||
MemoryUsageInBytes: removeUint64Ptr(newStats.Memory.UsageBytes),
|
||||
MemoryWorkingSetInBytes: removeUint64Ptr(newStats.Memory.WorkingSetBytes),
|
||||
MemoryRSSInBytes: removeUint64Ptr(newStats.Memory.RSSBytes),
|
||||
CPUInterval: 0,
|
||||
}
|
||||
}
|
||||
// Process container infos that are relevant to us.
|
||||
containers := containerNames()
|
||||
usageMap := make(ResourceUsagePerContainer, len(containers))
|
||||
observedContainers := []string{}
|
||||
for _, pod := range summary.Pods {
|
||||
for _, container := range pod.Containers {
|
||||
isInteresting := false
|
||||
for _, interestingContainerName := range containers {
|
||||
if container.Name == interestingContainerName {
|
||||
isInteresting = true
|
||||
observedContainers = append(observedContainers, container.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isInteresting {
|
||||
continue
|
||||
}
|
||||
if usage := f(pod.PodRef.Name+"/"+container.Name, &container); usage != nil {
|
||||
usageMap[pod.PodRef.Name+"/"+container.Name] = usage
|
||||
}
|
||||
}
|
||||
}
|
||||
return usageMap, nil
|
||||
}
|
||||
|
||||
func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
|
||||
data, err := c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
SetHeader("Content-Type", "application/json").
|
||||
Do().Raw()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var summary *stats.Summary
|
||||
err = json.Unmarshal(data, &summary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
func getSystemContainerStats(summary *stats.Summary) map[string]*stats.ContainerStats {
|
||||
statsList := summary.Node.SystemContainers
|
||||
statsMap := make(map[string]*stats.ContainerStats)
|
||||
for i := range statsList {
|
||||
statsMap[statsList[i].Name] = &statsList[i]
|
||||
}
|
||||
|
||||
// Create a root container stats using information available in
|
||||
// stats.NodeStats. This is necessary since it is a different type.
|
||||
statsMap[rootContainerName] = &stats.ContainerStats{
|
||||
CPU: summary.Node.CPU,
|
||||
Memory: summary.Node.Memory,
|
||||
}
|
||||
return statsMap
|
||||
}
|
||||
|
||||
const (
|
||||
rootContainerName = "/"
|
||||
)
|
||||
|
||||
// A list of containers for which we want to collect resource usage.
|
||||
func TargetContainers() []string {
|
||||
return []string{
|
||||
rootContainerName,
|
||||
stats.SystemContainerRuntime,
|
||||
stats.SystemContainerKubelet,
|
||||
}
|
||||
}
|
||||
|
||||
type ContainerResourceUsage struct {
|
||||
Name string
|
||||
Timestamp time.Time
|
||||
CPUUsageInCores float64
|
||||
MemoryUsageInBytes uint64
|
||||
MemoryWorkingSetInBytes uint64
|
||||
MemoryRSSInBytes uint64
|
||||
// The interval used to calculate CPUUsageInCores.
|
||||
CPUInterval time.Duration
|
||||
}
|
||||
|
||||
func (r *ContainerResourceUsage) isStrictlyGreaterThan(rhs *ContainerResourceUsage) bool {
|
||||
return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes
|
||||
}
|
||||
|
||||
type ResourceUsagePerContainer map[string]*ContainerResourceUsage
|
||||
type ResourceUsagePerNode map[string]ResourceUsagePerContainer
|
||||
|
||||
func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerContainer) string {
|
||||
// Example output:
|
||||
//
|
||||
// Resource usage for node "e2e-test-foo-node-abcde":
|
||||
// container cpu(cores) memory(MB)
|
||||
// "/" 0.363 2942.09
|
||||
// "/docker-daemon" 0.088 521.80
|
||||
// "/kubelet" 0.086 424.37
|
||||
// "/system" 0.007 119.88
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "container\tcpu(cores)\tmemory_working_set(MB)\tmemory_rss(MB)\n")
|
||||
for name, s := range containerStats {
|
||||
fmt.Fprintf(w, "%q\t%.3f\t%.2f\t%.2f\n", name, s.CPUUsageInCores, float64(s.MemoryWorkingSetInBytes)/(1024*1024), float64(s.MemoryRSSInBytes)/(1024*1024))
|
||||
}
|
||||
w.Flush()
|
||||
return fmt.Sprintf("Resource usage on node %q:\n%s", nodeName, buf.String())
|
||||
}
|
||||
|
||||
type uint64arr []uint64
|
||||
|
||||
func (a uint64arr) Len() int { return len(a) }
|
||||
func (a uint64arr) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a uint64arr) Less(i, j int) bool { return a[i] < a[j] }
|
||||
|
||||
type usageDataPerContainer struct {
|
||||
cpuData []float64
|
||||
memUseData []uint64
|
||||
memWorkSetData []uint64
|
||||
}
|
||||
|
||||
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
|
||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
raw, errRaw := client.Raw()
|
||||
if errRaw != nil {
|
||||
return "", err
|
||||
}
|
||||
stats := string(raw)
|
||||
// Only dumping the runtime.MemStats numbers to avoid polluting the log.
|
||||
numLines := 23
|
||||
lines := strings.Split(stats, "\n")
|
||||
return strings.Join(lines[len(lines)-numLines:], "\n"), nil
|
||||
}
|
||||
|
||||
func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
|
||||
podList, err := GetKubeletPods(c, nodeName)
|
||||
if err != nil {
|
||||
Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err)
|
||||
return
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
Logf("%v from %v started at %v (%d container statuses recorded)", p.Name, p.Namespace, p.Status.StartTime, len(p.Status.ContainerStatuses))
|
||||
for _, c := range p.Status.ContainerStatuses {
|
||||
Logf("\tContainer %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func computeContainerResourceUsage(name string, oldStats, newStats *stats.ContainerStats) *ContainerResourceUsage {
|
||||
return &ContainerResourceUsage{
|
||||
Name: name,
|
||||
Timestamp: newStats.CPU.Time.Time,
|
||||
CPUUsageInCores: float64(*newStats.CPU.UsageCoreNanoSeconds-*oldStats.CPU.UsageCoreNanoSeconds) / float64(newStats.CPU.Time.Time.Sub(oldStats.CPU.Time.Time).Nanoseconds()),
|
||||
MemoryUsageInBytes: *newStats.Memory.UsageBytes,
|
||||
MemoryWorkingSetInBytes: *newStats.Memory.WorkingSetBytes,
|
||||
MemoryRSSInBytes: *newStats.Memory.RSSBytes,
|
||||
CPUInterval: newStats.CPU.Time.Time.Sub(oldStats.CPU.Time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
// resourceCollector periodically polls the node, collect stats for a given
|
||||
// list of containers, computes and cache resource usage up to
|
||||
// maxEntriesPerContainer for each container.
|
||||
type resourceCollector struct {
|
||||
lock sync.RWMutex
|
||||
node string
|
||||
containers []string
|
||||
client clientset.Interface
|
||||
buffers map[string][]*ContainerResourceUsage
|
||||
pollingInterval time.Duration
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func newResourceCollector(c clientset.Interface, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector {
|
||||
buffers := make(map[string][]*ContainerResourceUsage)
|
||||
return &resourceCollector{
|
||||
node: nodeName,
|
||||
containers: containerNames,
|
||||
client: c,
|
||||
buffers: buffers,
|
||||
pollingInterval: pollingInterval,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts a goroutine to Poll the node every pollingInterval.
|
||||
func (r *resourceCollector) Start() {
|
||||
r.stopCh = make(chan struct{}, 1)
|
||||
// Keep the last observed stats for comparison.
|
||||
oldStats := make(map[string]*stats.ContainerStats)
|
||||
go wait.Until(func() { r.collectStats(oldStats) }, r.pollingInterval, r.stopCh)
|
||||
}
|
||||
|
||||
// Stop sends a signal to terminate the stats collecting goroutine.
|
||||
func (r *resourceCollector) Stop() {
|
||||
close(r.stopCh)
|
||||
}
|
||||
|
||||
// collectStats gets the latest stats from kubelet stats summary API, computes
|
||||
// the resource usage, and pushes it to the buffer.
|
||||
func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.ContainerStats) {
|
||||
summary, err := getNodeStatsSummary(r.client, r.node)
|
||||
if err != nil {
|
||||
Logf("Error getting node stats summary on %q, err: %v", r.node, err)
|
||||
return
|
||||
}
|
||||
cStatsMap := getSystemContainerStats(summary)
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
for _, name := range r.containers {
|
||||
cStats, ok := cStatsMap[name]
|
||||
if !ok {
|
||||
Logf("Missing info/stats for container %q on node %q", name, r.node)
|
||||
return
|
||||
}
|
||||
|
||||
if oldStats, ok := oldStatsMap[name]; ok {
|
||||
if oldStats.CPU.Time.Equal(&cStats.CPU.Time) {
|
||||
// No change -> skip this stat.
|
||||
continue
|
||||
}
|
||||
r.buffers[name] = append(r.buffers[name], computeContainerResourceUsage(name, oldStats, cStats))
|
||||
}
|
||||
// Update the old stats.
|
||||
oldStatsMap[name] = cStats
|
||||
}
|
||||
}
|
||||
|
||||
func (r *resourceCollector) GetLatest() (ResourceUsagePerContainer, error) {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
stats := make(ResourceUsagePerContainer)
|
||||
for _, name := range r.containers {
|
||||
contStats, ok := r.buffers[name]
|
||||
if !ok || len(contStats) == 0 {
|
||||
return nil, fmt.Errorf("Resource usage on node %q is not ready yet", r.node)
|
||||
}
|
||||
stats[name] = contStats[len(contStats)-1]
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// Reset frees the stats and start over.
|
||||
func (r *resourceCollector) Reset() {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
for _, name := range r.containers {
|
||||
r.buffers[name] = []*ContainerResourceUsage{}
|
||||
}
|
||||
}
|
||||
|
||||
type resourceUsageByCPU []*ContainerResourceUsage
|
||||
|
||||
func (r resourceUsageByCPU) Len() int { return len(r) }
|
||||
func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r resourceUsageByCPU) Less(i, j int) bool { return r[i].CPUUsageInCores < r[j].CPUUsageInCores }
|
||||
|
||||
// The percentiles to report.
|
||||
var percentiles = [...]float64{0.05, 0.20, 0.50, 0.70, 0.90, 0.95, 0.99}
|
||||
|
||||
// GetBasicCPUStats returns the percentiles the cpu usage in cores for
|
||||
// containerName. This method examines all data currently in the buffer.
|
||||
func (r *resourceCollector) GetBasicCPUStats(containerName string) map[float64]float64 {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
result := make(map[float64]float64, len(percentiles))
|
||||
usages := r.buffers[containerName]
|
||||
sort.Sort(resourceUsageByCPU(usages))
|
||||
for _, q := range percentiles {
|
||||
index := int(float64(len(usages))*q) - 1
|
||||
if index < 0 {
|
||||
// We don't have enough data.
|
||||
result[q] = 0
|
||||
continue
|
||||
}
|
||||
result[q] = usages[index].CPUUsageInCores
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ResourceMonitor manages a resourceCollector per node.
|
||||
type ResourceMonitor struct {
|
||||
client clientset.Interface
|
||||
containers []string
|
||||
pollingInterval time.Duration
|
||||
collectors map[string]*resourceCollector
|
||||
}
|
||||
|
||||
func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
|
||||
return &ResourceMonitor{
|
||||
containers: containerNames,
|
||||
client: c,
|
||||
pollingInterval: pollingInterval,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) Start() {
|
||||
// It should be OK to monitor unschedulable Nodes
|
||||
nodes, err := r.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Failf("ResourceMonitor: unable to get list of nodes: %v", err)
|
||||
}
|
||||
r.collectors = make(map[string]*resourceCollector, 0)
|
||||
for _, node := range nodes.Items {
|
||||
collector := newResourceCollector(r.client, node.Name, r.containers, r.pollingInterval)
|
||||
r.collectors[node.Name] = collector
|
||||
collector.Start()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) Stop() {
|
||||
for _, collector := range r.collectors {
|
||||
collector.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) Reset() {
|
||||
for _, collector := range r.collectors {
|
||||
collector.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) LogLatest() {
|
||||
summary, err := r.GetLatest()
|
||||
if err != nil {
|
||||
Logf("%v", err)
|
||||
}
|
||||
Logf("%s", r.FormatResourceUsage(summary))
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
|
||||
summary := []string{}
|
||||
for node, usage := range s {
|
||||
summary = append(summary, formatResourceUsageStats(node, usage))
|
||||
}
|
||||
return strings.Join(summary, "\n")
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
|
||||
result := make(ResourceUsagePerNode)
|
||||
errs := []error{}
|
||||
for key, collector := range r.collectors {
|
||||
s, err := collector.GetLatest()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
result[key] = s
|
||||
}
|
||||
return result, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode) ResourceUsagePerNode {
|
||||
result := make(ResourceUsagePerNode)
|
||||
var masterUsage ResourceUsagePerContainer
|
||||
var nodesUsage []ResourceUsagePerContainer
|
||||
for node, usage := range usagePerNode {
|
||||
if strings.HasSuffix(node, "master") {
|
||||
masterUsage = usage
|
||||
} else {
|
||||
nodesUsage = append(nodesUsage, usage)
|
||||
}
|
||||
}
|
||||
nodeAvgUsage := make(ResourceUsagePerContainer)
|
||||
for _, nodeUsage := range nodesUsage {
|
||||
for c, usage := range nodeUsage {
|
||||
if _, found := nodeAvgUsage[c]; !found {
|
||||
nodeAvgUsage[c] = &ContainerResourceUsage{Name: usage.Name}
|
||||
}
|
||||
nodeAvgUsage[c].CPUUsageInCores += usage.CPUUsageInCores
|
||||
nodeAvgUsage[c].MemoryUsageInBytes += usage.MemoryUsageInBytes
|
||||
nodeAvgUsage[c].MemoryWorkingSetInBytes += usage.MemoryWorkingSetInBytes
|
||||
nodeAvgUsage[c].MemoryRSSInBytes += usage.MemoryRSSInBytes
|
||||
}
|
||||
}
|
||||
for c := range nodeAvgUsage {
|
||||
nodeAvgUsage[c].CPUUsageInCores /= float64(len(nodesUsage))
|
||||
nodeAvgUsage[c].MemoryUsageInBytes /= uint64(len(nodesUsage))
|
||||
nodeAvgUsage[c].MemoryWorkingSetInBytes /= uint64(len(nodesUsage))
|
||||
nodeAvgUsage[c].MemoryRSSInBytes /= uint64(len(nodesUsage))
|
||||
}
|
||||
result["master"] = masterUsage
|
||||
result["node"] = nodeAvgUsage
|
||||
return result
|
||||
}
|
||||
|
||||
// ContainersCPUSummary is indexed by the container name with each entry a
|
||||
// (percentile, value) map.
|
||||
type ContainersCPUSummary map[string]map[float64]float64
|
||||
|
||||
// NodesCPUSummary is indexed by the node name with each entry a
|
||||
// ContainersCPUSummary map.
|
||||
type NodesCPUSummary map[string]ContainersCPUSummary
|
||||
|
||||
func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
|
||||
// Example output for a node (the percentiles may differ):
|
||||
// CPU usage of containers on node "e2e-test-foo-node-0vj7":
|
||||
// container 5th% 50th% 90th% 95th%
|
||||
// "/" 0.051 0.159 0.387 0.455
|
||||
// "/runtime 0.000 0.000 0.146 0.166
|
||||
// "/kubelet" 0.036 0.053 0.091 0.154
|
||||
// "/misc" 0.001 0.001 0.001 0.002
|
||||
var summaryStrings []string
|
||||
var header []string
|
||||
header = append(header, "container")
|
||||
for _, p := range percentiles {
|
||||
header = append(header, fmt.Sprintf("%.0fth%%", p*100))
|
||||
}
|
||||
for nodeName, containers := range summary {
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "%s\n", strings.Join(header, "\t"))
|
||||
for _, containerName := range TargetContainers() {
|
||||
var s []string
|
||||
s = append(s, fmt.Sprintf("%q", containerName))
|
||||
data, ok := containers[containerName]
|
||||
for _, p := range percentiles {
|
||||
value := "N/A"
|
||||
if ok {
|
||||
value = fmt.Sprintf("%.3f", data[p])
|
||||
}
|
||||
s = append(s, value)
|
||||
}
|
||||
fmt.Fprintf(w, "%s\n", strings.Join(s, "\t"))
|
||||
}
|
||||
w.Flush()
|
||||
summaryStrings = append(summaryStrings, fmt.Sprintf("CPU usage of containers on node %q\n:%s", nodeName, buf.String()))
|
||||
}
|
||||
return strings.Join(summaryStrings, "\n")
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) LogCPUSummary() {
|
||||
summary := r.GetCPUSummary()
|
||||
Logf("%s", r.FormatCPUSummary(summary))
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
|
||||
result := make(NodesCPUSummary)
|
||||
for nodeName, collector := range r.collectors {
|
||||
result[nodeName] = make(ContainersCPUSummary)
|
||||
for _, containerName := range TargetContainers() {
|
||||
data := collector.GetBasicCPUStats(containerName)
|
||||
result[nodeName][containerName] = data
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) GetMasterNodeCPUSummary(summaryPerNode NodesCPUSummary) NodesCPUSummary {
|
||||
result := make(NodesCPUSummary)
|
||||
var masterSummary ContainersCPUSummary
|
||||
var nodesSummaries []ContainersCPUSummary
|
||||
for node, summary := range summaryPerNode {
|
||||
if strings.HasSuffix(node, "master") {
|
||||
masterSummary = summary
|
||||
} else {
|
||||
nodesSummaries = append(nodesSummaries, summary)
|
||||
}
|
||||
}
|
||||
|
||||
nodeAvgSummary := make(ContainersCPUSummary)
|
||||
for _, nodeSummary := range nodesSummaries {
|
||||
for c, summary := range nodeSummary {
|
||||
if _, found := nodeAvgSummary[c]; !found {
|
||||
nodeAvgSummary[c] = map[float64]float64{}
|
||||
}
|
||||
for perc, value := range summary {
|
||||
nodeAvgSummary[c][perc] += value
|
||||
}
|
||||
}
|
||||
}
|
||||
for c := range nodeAvgSummary {
|
||||
for perc := range nodeAvgSummary[c] {
|
||||
nodeAvgSummary[c][perc] /= float64(len(nodesSummaries))
|
||||
}
|
||||
}
|
||||
result["master"] = masterSummary
|
||||
result["node"] = nodeAvgSummary
|
||||
return result
|
||||
}
|
277
vendor/k8s.io/kubernetes/test/e2e/framework/log_size_monitoring.go
generated
vendored
Normal file
277
vendor/k8s.io/kubernetes/test/e2e/framework/log_size_monitoring.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minimal period between polling log sizes from components
|
||||
pollingPeriod = 60 * time.Second
|
||||
workersNo = 5
|
||||
kubeletLogsPath = "/var/log/kubelet.log"
|
||||
kubeProxyLogsPath = "/var/log/kube-proxy.log"
|
||||
kubeAddonsLogsPath = "/var/log/kube-addons.log"
|
||||
kubeMasterAddonsLogsPath = "/var/log/kube-master-addons.log"
|
||||
apiServerLogsPath = "/var/log/kube-apiserver.log"
|
||||
controllersLogsPath = "/var/log/kube-controller-manager.log"
|
||||
schedulerLogsPath = "/var/log/kube-scheduler.log"
|
||||
)
|
||||
|
||||
var (
|
||||
nodeLogsToCheck = []string{kubeletLogsPath, kubeProxyLogsPath}
|
||||
masterLogsToCheck = []string{kubeletLogsPath, kubeAddonsLogsPath, kubeMasterAddonsLogsPath,
|
||||
apiServerLogsPath, controllersLogsPath, schedulerLogsPath}
|
||||
)
|
||||
|
||||
// TimestampedSize contains a size together with a time of measurement.
|
||||
type TimestampedSize struct {
|
||||
timestamp time.Time
|
||||
size int
|
||||
}
|
||||
|
||||
// LogSizeGatherer is a worker which grabs a WorkItem from the channel and does assigned work.
|
||||
type LogSizeGatherer struct {
|
||||
stopChannel chan bool
|
||||
data *LogsSizeData
|
||||
wg *sync.WaitGroup
|
||||
workChannel chan WorkItem
|
||||
}
|
||||
|
||||
// LogsSizeVerifier gathers data about log files sizes from master and node machines.
|
||||
// It oversees a <workersNo> workers which do the gathering.
|
||||
type LogsSizeVerifier struct {
|
||||
client clientset.Interface
|
||||
stopChannel chan bool
|
||||
// data stores LogSizeData groupped per IP and log_path
|
||||
data *LogsSizeData
|
||||
masterAddress string
|
||||
nodeAddresses []string
|
||||
wg sync.WaitGroup
|
||||
workChannel chan WorkItem
|
||||
workers []*LogSizeGatherer
|
||||
}
|
||||
|
||||
type SingleLogSummary struct {
|
||||
AverageGenerationRate int
|
||||
NumberOfProbes int
|
||||
}
|
||||
|
||||
type LogSizeDataTimeseries map[string]map[string][]TimestampedSize
|
||||
|
||||
// node -> file -> data
|
||||
type LogsSizeDataSummary map[string]map[string]SingleLogSummary
|
||||
|
||||
// TODO: make sure that we don't need locking here
|
||||
func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "host\tlog_file\taverage_rate (B/s)\tnumber_of_probes\n")
|
||||
for k, v := range *s {
|
||||
fmt.Fprintf(w, "%v\t\t\t\n", k)
|
||||
for path, data := range v {
|
||||
fmt.Fprintf(w, "\t%v\t%v\t%v\n", path, data.AverageGenerationRate, data.NumberOfProbes)
|
||||
}
|
||||
}
|
||||
w.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (s *LogsSizeDataSummary) PrintJSON() string {
|
||||
return PrettyPrintJSON(*s)
|
||||
}
|
||||
|
||||
func (s *LogsSizeDataSummary) SummaryKind() string {
|
||||
return "LogSizeSummary"
|
||||
}
|
||||
|
||||
type LogsSizeData struct {
|
||||
data LogSizeDataTimeseries
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// WorkItem is a command for a worker that contains an IP of machine from which we want to
|
||||
// gather data and paths to all files we're interested in.
|
||||
type WorkItem struct {
|
||||
ip string
|
||||
paths []string
|
||||
backoffMultiplier int
|
||||
}
|
||||
|
||||
func prepareData(masterAddress string, nodeAddresses []string) *LogsSizeData {
|
||||
data := make(LogSizeDataTimeseries)
|
||||
ips := append(nodeAddresses, masterAddress)
|
||||
for _, ip := range ips {
|
||||
data[ip] = make(map[string][]TimestampedSize)
|
||||
}
|
||||
return &LogsSizeData{
|
||||
data: data,
|
||||
lock: sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int) {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
d.data[ip][path] = append(
|
||||
d.data[ip][path],
|
||||
TimestampedSize{
|
||||
timestamp: timestamp,
|
||||
size: size,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
|
||||
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
|
||||
nodeAddresses, err := NodeSSHHosts(c)
|
||||
ExpectNoError(err)
|
||||
masterAddress := GetMasterHost() + ":22"
|
||||
|
||||
workChannel := make(chan WorkItem, len(nodeAddresses)+1)
|
||||
workers := make([]*LogSizeGatherer, workersNo)
|
||||
|
||||
verifier := &LogsSizeVerifier{
|
||||
client: c,
|
||||
stopChannel: stopChannel,
|
||||
data: prepareData(masterAddress, nodeAddresses),
|
||||
masterAddress: masterAddress,
|
||||
nodeAddresses: nodeAddresses,
|
||||
wg: sync.WaitGroup{},
|
||||
workChannel: workChannel,
|
||||
workers: workers,
|
||||
}
|
||||
verifier.wg.Add(workersNo)
|
||||
for i := 0; i < workersNo; i++ {
|
||||
workers[i] = &LogSizeGatherer{
|
||||
stopChannel: stopChannel,
|
||||
data: verifier.data,
|
||||
wg: &verifier.wg,
|
||||
workChannel: workChannel,
|
||||
}
|
||||
}
|
||||
return verifier
|
||||
}
|
||||
|
||||
// GetSummary returns a summary (average generation rate and number of probes) of the data gathered by LogSizeVerifier
|
||||
func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
|
||||
result := make(LogsSizeDataSummary)
|
||||
for k, v := range s.data.data {
|
||||
result[k] = make(map[string]SingleLogSummary)
|
||||
for path, data := range v {
|
||||
if len(data) > 1 {
|
||||
last := data[len(data)-1]
|
||||
first := data[0]
|
||||
rate := (last.size - first.size) / int(last.timestamp.Sub(first.timestamp)/time.Second)
|
||||
result[k][path] = SingleLogSummary{
|
||||
AverageGenerationRate: rate,
|
||||
NumberOfProbes: len(data),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
|
||||
func (v *LogsSizeVerifier) Run() {
|
||||
v.workChannel <- WorkItem{
|
||||
ip: v.masterAddress,
|
||||
paths: masterLogsToCheck,
|
||||
backoffMultiplier: 1,
|
||||
}
|
||||
for _, node := range v.nodeAddresses {
|
||||
v.workChannel <- WorkItem{
|
||||
ip: node,
|
||||
paths: nodeLogsToCheck,
|
||||
backoffMultiplier: 1,
|
||||
}
|
||||
}
|
||||
for _, worker := range v.workers {
|
||||
go worker.Run()
|
||||
}
|
||||
<-v.stopChannel
|
||||
v.wg.Wait()
|
||||
}
|
||||
|
||||
func (g *LogSizeGatherer) Run() {
|
||||
for g.Work() {
|
||||
}
|
||||
}
|
||||
|
||||
func (g *LogSizeGatherer) pushWorkItem(workItem WorkItem) {
|
||||
select {
|
||||
case <-time.After(time.Duration(workItem.backoffMultiplier) * pollingPeriod):
|
||||
g.workChannel <- workItem
|
||||
case <-g.stopChannel:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Work does a single unit of work: tries to take out a WorkItem from the queue, ssh-es into a given machine,
|
||||
// gathers data, writes it to the shared <data> map, and creates a gorouting which reinserts work item into
|
||||
// the queue with a <pollingPeriod> delay. Returns false if worker should exit.
|
||||
func (g *LogSizeGatherer) Work() bool {
|
||||
var workItem WorkItem
|
||||
select {
|
||||
case <-g.stopChannel:
|
||||
g.wg.Done()
|
||||
return false
|
||||
case workItem = <-g.workChannel:
|
||||
}
|
||||
sshResult, err := SSH(
|
||||
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
|
||||
workItem.ip,
|
||||
TestContext.Provider,
|
||||
)
|
||||
if err != nil {
|
||||
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
|
||||
// In case of repeated error give up.
|
||||
if workItem.backoffMultiplier >= 128 {
|
||||
Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
|
||||
g.wg.Done()
|
||||
return false
|
||||
}
|
||||
workItem.backoffMultiplier *= 2
|
||||
go g.pushWorkItem(workItem)
|
||||
return true
|
||||
}
|
||||
workItem.backoffMultiplier = 1
|
||||
results := strings.Split(sshResult.Stdout, " ")
|
||||
|
||||
now := time.Now()
|
||||
for i := 0; i+1 < len(results); i = i + 2 {
|
||||
path := results[i]
|
||||
size, err := strconv.Atoi(results[i+1])
|
||||
if err != nil {
|
||||
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
|
||||
continue
|
||||
}
|
||||
g.data.AddNewData(workItem.ip, path, now, size)
|
||||
}
|
||||
go g.pushWorkItem(workItem)
|
||||
return true
|
||||
}
|
44
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"api_server_metrics.go",
|
||||
"cluster_autoscaler_metrics.go",
|
||||
"controller_manager_metrics.go",
|
||||
"generic_metrics.go",
|
||||
"kubelet_metrics.go",
|
||||
"metrics_grabber.go",
|
||||
"scheduler_metrics.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/metrics",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
44
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/api_server_metrics.go
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/api_server_metrics.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
type ApiServerMetrics Metrics
|
||||
|
||||
func (m *ApiServerMetrics) Equal(o ApiServerMetrics) bool {
|
||||
return (*Metrics)(m).Equal(Metrics(o))
|
||||
}
|
||||
|
||||
func NewApiServerMetrics() ApiServerMetrics {
|
||||
result := NewMetrics()
|
||||
return ApiServerMetrics(result)
|
||||
}
|
||||
|
||||
func parseApiServerMetrics(data string) (ApiServerMetrics, error) {
|
||||
result := NewApiServerMetrics()
|
||||
if err := parseMetrics(data, (*Metrics)(&result)); err != nil {
|
||||
return ApiServerMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) getMetricsFromApiServer() (string, error) {
|
||||
rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do().Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(rawOutput), nil
|
||||
}
|
36
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/cluster_autoscaler_metrics.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/cluster_autoscaler_metrics.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
type ClusterAutoscalerMetrics Metrics
|
||||
|
||||
func (m *ClusterAutoscalerMetrics) Equal(o ClusterAutoscalerMetrics) bool {
|
||||
return (*Metrics)(m).Equal(Metrics(o))
|
||||
}
|
||||
|
||||
func NewClusterAutoscalerMetrics() ClusterAutoscalerMetrics {
|
||||
result := NewMetrics()
|
||||
return ClusterAutoscalerMetrics(result)
|
||||
}
|
||||
|
||||
func parseClusterAutoscalerMetrics(data string) (ClusterAutoscalerMetrics, error) {
|
||||
result := NewClusterAutoscalerMetrics()
|
||||
if err := parseMetrics(data, (*Metrics)(&result)); err != nil {
|
||||
return ClusterAutoscalerMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
36
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/controller_manager_metrics.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/controller_manager_metrics.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
type ControllerManagerMetrics Metrics
|
||||
|
||||
func (m *ControllerManagerMetrics) Equal(o ControllerManagerMetrics) bool {
|
||||
return (*Metrics)(m).Equal(Metrics(o))
|
||||
}
|
||||
|
||||
func NewControllerManagerMetrics() ControllerManagerMetrics {
|
||||
result := NewMetrics()
|
||||
return ControllerManagerMetrics(result)
|
||||
}
|
||||
|
||||
func parseControllerManagerMetrics(data string) (ControllerManagerMetrics, error) {
|
||||
result := NewControllerManagerMetrics()
|
||||
if err := parseMetrics(data, (*Metrics)(&result)); err != nil {
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
99
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/generic_metrics.go
generated
vendored
Normal file
99
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/generic_metrics.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
type Metrics map[string]model.Samples
|
||||
|
||||
func (m *Metrics) Equal(o Metrics) bool {
|
||||
leftKeySet := []string{}
|
||||
rightKeySet := []string{}
|
||||
for k := range *m {
|
||||
leftKeySet = append(leftKeySet, k)
|
||||
}
|
||||
for k := range o {
|
||||
rightKeySet = append(rightKeySet, k)
|
||||
}
|
||||
if !reflect.DeepEqual(leftKeySet, rightKeySet) {
|
||||
return false
|
||||
}
|
||||
for _, k := range leftKeySet {
|
||||
if !(*m)[k].Equal(o[k]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func PrintSample(sample *model.Sample) string {
|
||||
buf := make([]string, 0)
|
||||
// Id is a VERY special label. For 'normal' container it's useless, but it's necessary
|
||||
// for 'system' containers (e.g. /docker-daemon, /kubelet, etc.). We know if that's the
|
||||
// case by checking if there's a label "kubernetes_container_name" present. It's hacky
|
||||
// but it works...
|
||||
_, normalContainer := sample.Metric["kubernetes_container_name"]
|
||||
for k, v := range sample.Metric {
|
||||
if strings.HasPrefix(string(k), "__") {
|
||||
continue
|
||||
}
|
||||
|
||||
if string(k) == "id" && normalContainer {
|
||||
continue
|
||||
}
|
||||
buf = append(buf, fmt.Sprintf("%v=%v", string(k), v))
|
||||
}
|
||||
return fmt.Sprintf("[%v] = %v", strings.Join(buf, ","), sample.Value)
|
||||
}
|
||||
|
||||
func NewMetrics() Metrics {
|
||||
result := make(Metrics)
|
||||
return result
|
||||
}
|
||||
|
||||
func parseMetrics(data string, output *Metrics) error {
|
||||
dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.FmtText)
|
||||
decoder := expfmt.SampleDecoder{
|
||||
Dec: dec,
|
||||
Opts: &expfmt.DecodeOptions{},
|
||||
}
|
||||
|
||||
for {
|
||||
var v model.Vector
|
||||
if err := decoder.Decode(&v); err != nil {
|
||||
if err == io.EOF {
|
||||
// Expected loop termination condition.
|
||||
return nil
|
||||
}
|
||||
glog.Warningf("Invalid Decode. Skipping.")
|
||||
continue
|
||||
}
|
||||
for _, metric := range v {
|
||||
name := string(metric.Metric[model.MetricNameLabel])
|
||||
(*output)[name] = append((*output)[name], metric)
|
||||
}
|
||||
}
|
||||
}
|
85
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type KubeletMetrics Metrics
|
||||
|
||||
func (m *KubeletMetrics) Equal(o KubeletMetrics) bool {
|
||||
return (*Metrics)(m).Equal(Metrics(o))
|
||||
}
|
||||
|
||||
func NewKubeletMetrics() KubeletMetrics {
|
||||
result := NewMetrics()
|
||||
return KubeletMetrics(result)
|
||||
}
|
||||
|
||||
// GrabKubeletMetricsWithoutProxy retrieve metrics from the kubelet on the given node using a simple GET over http.
|
||||
// Currently only used in integration tests.
|
||||
func GrabKubeletMetricsWithoutProxy(nodeName string) (KubeletMetrics, error) {
|
||||
metricsEndpoint := "http://%s/metrics"
|
||||
resp, err := http.Get(fmt.Sprintf(metricsEndpoint, nodeName))
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
return parseKubeletMetrics(string(body))
|
||||
}
|
||||
|
||||
func parseKubeletMetrics(data string) (KubeletMetrics, error) {
|
||||
result := NewKubeletMetrics()
|
||||
if err := parseMetrics(data, (*Metrics)(&result)); err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) getMetricsFromNode(nodeName string, kubeletPort int) (string, error) {
|
||||
// There's a problem with timing out during proxy. Wrapping this in a goroutine to prevent deadlock.
|
||||
// Hanging goroutine will be leaked.
|
||||
finished := make(chan struct{})
|
||||
var err error
|
||||
var rawOutput []byte
|
||||
go func() {
|
||||
rawOutput, err = g.client.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).
|
||||
Suffix("metrics").
|
||||
Do().Raw()
|
||||
finished <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-time.After(ProxyTimeout):
|
||||
return "", fmt.Errorf("Timed out when waiting for proxy to gather metrics from %v", nodeName)
|
||||
case <-finished:
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(rawOutput), nil
|
||||
}
|
||||
}
|
245
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
Normal file
245
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
Normal file
@ -0,0 +1,245 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
ProxyTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
type MetricsCollection struct {
|
||||
ApiServerMetrics ApiServerMetrics
|
||||
ControllerManagerMetrics ControllerManagerMetrics
|
||||
KubeletMetrics map[string]KubeletMetrics
|
||||
SchedulerMetrics SchedulerMetrics
|
||||
ClusterAutoscalerMetrics ClusterAutoscalerMetrics
|
||||
}
|
||||
|
||||
type MetricsGrabber struct {
|
||||
client clientset.Interface
|
||||
externalClient clientset.Interface
|
||||
grabFromApiServer bool
|
||||
grabFromControllerManager bool
|
||||
grabFromKubelets bool
|
||||
grabFromScheduler bool
|
||||
grabFromClusterAutoscaler bool
|
||||
masterName string
|
||||
registeredMaster bool
|
||||
}
|
||||
|
||||
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*MetricsGrabber, error) {
|
||||
registeredMaster := false
|
||||
masterName := ""
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodeList.Items) < 1 {
|
||||
glog.Warning("Can't find any Nodes in the API server to grab metrics from")
|
||||
}
|
||||
for _, node := range nodeList.Items {
|
||||
if system.IsMasterNode(node.Name) {
|
||||
registeredMaster = true
|
||||
masterName = node.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
if !registeredMaster {
|
||||
scheduler = false
|
||||
controllers = false
|
||||
clusterAutoscaler = ec != nil
|
||||
if clusterAutoscaler {
|
||||
glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager is disabled.")
|
||||
} else {
|
||||
glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.")
|
||||
}
|
||||
}
|
||||
|
||||
return &MetricsGrabber{
|
||||
client: c,
|
||||
externalClient: ec,
|
||||
grabFromApiServer: apiServer,
|
||||
grabFromControllerManager: controllers,
|
||||
grabFromKubelets: kubelets,
|
||||
grabFromScheduler: scheduler,
|
||||
grabFromClusterAutoscaler: clusterAutoscaler,
|
||||
masterName: masterName,
|
||||
registeredMaster: registeredMaster,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HasRegisteredMaster returns if metrics grabber was able to find a master node
|
||||
func (g *MetricsGrabber) HasRegisteredMaster() bool {
|
||||
return g.registeredMaster
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) {
|
||||
nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector().String()})
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
if len(nodes.Items) != 1 {
|
||||
return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items)
|
||||
}
|
||||
kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port
|
||||
return g.grabFromKubeletInternal(nodeName, int(kubeletPort))
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (KubeletMetrics, error) {
|
||||
if kubeletPort <= 0 || kubeletPort > 65535 {
|
||||
return KubeletMetrics{}, fmt.Errorf("Invalid Kubelet port %v. Skipping Kubelet's metrics gathering.", kubeletPort)
|
||||
}
|
||||
output, err := g.getMetricsFromNode(nodeName, int(kubeletPort))
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
return parseKubeletMetrics(output)
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) GrabFromScheduler() (SchedulerMetrics, error) {
|
||||
if !g.registeredMaster {
|
||||
return SchedulerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping Scheduler's metrics gathering.")
|
||||
}
|
||||
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-scheduler", g.masterName), metav1.NamespaceSystem, ports.SchedulerPort)
|
||||
if err != nil {
|
||||
return SchedulerMetrics{}, err
|
||||
}
|
||||
return parseSchedulerMetrics(output)
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) {
|
||||
if !g.registeredMaster && g.externalClient == nil {
|
||||
return ClusterAutoscalerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ClusterAutoscaler's metrics gathering.")
|
||||
}
|
||||
var client clientset.Interface
|
||||
var namespace string
|
||||
if g.externalClient != nil {
|
||||
client = g.externalClient
|
||||
namespace = "kubemark"
|
||||
} else {
|
||||
client = g.client
|
||||
namespace = metav1.NamespaceSystem
|
||||
}
|
||||
output, err := g.getMetricsFromPod(client, "cluster-autoscaler", namespace, 8085)
|
||||
if err != nil {
|
||||
return ClusterAutoscalerMetrics{}, err
|
||||
}
|
||||
return parseClusterAutoscalerMetrics(output)
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) GrabFromControllerManager() (ControllerManagerMetrics, error) {
|
||||
if !g.registeredMaster {
|
||||
return ControllerManagerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ControllerManager's metrics gathering.")
|
||||
}
|
||||
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName), metav1.NamespaceSystem, ports.ControllerManagerPort)
|
||||
if err != nil {
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
return parseControllerManagerMetrics(output)
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) GrabFromApiServer() (ApiServerMetrics, error) {
|
||||
output, err := g.getMetricsFromApiServer()
|
||||
if err != nil {
|
||||
return ApiServerMetrics{}, nil
|
||||
}
|
||||
return parseApiServerMetrics(output)
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) Grab() (MetricsCollection, error) {
|
||||
result := MetricsCollection{}
|
||||
var errs []error
|
||||
if g.grabFromApiServer {
|
||||
metrics, err := g.GrabFromApiServer()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
result.ApiServerMetrics = metrics
|
||||
}
|
||||
}
|
||||
if g.grabFromScheduler {
|
||||
metrics, err := g.GrabFromScheduler()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
result.SchedulerMetrics = metrics
|
||||
}
|
||||
}
|
||||
if g.grabFromControllerManager {
|
||||
metrics, err := g.GrabFromControllerManager()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
result.ControllerManagerMetrics = metrics
|
||||
}
|
||||
}
|
||||
if g.grabFromClusterAutoscaler {
|
||||
metrics, err := g.GrabFromClusterAutoscaler()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
result.ClusterAutoscalerMetrics = metrics
|
||||
}
|
||||
}
|
||||
if g.grabFromKubelets {
|
||||
result.KubeletMetrics = make(map[string]KubeletMetrics)
|
||||
nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
for _, node := range nodes.Items {
|
||||
kubeletPort := node.Status.DaemonEndpoints.KubeletEndpoint.Port
|
||||
metrics, err := g.grabFromKubeletInternal(node.Name, int(kubeletPort))
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
result.KubeletMetrics[node.Name] = metrics
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return result, fmt.Errorf("Errors while grabbing metrics: %v", errs)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) getMetricsFromPod(client clientset.Interface, podName string, namespace string, port int) (string, error) {
|
||||
rawOutput, err := client.CoreV1().RESTClient().Get().
|
||||
Namespace(namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", podName, port)).
|
||||
Suffix("metrics").
|
||||
Do().Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(rawOutput), nil
|
||||
}
|
36
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/scheduler_metrics.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/scheduler_metrics.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
type SchedulerMetrics Metrics
|
||||
|
||||
func (m *SchedulerMetrics) Equal(o SchedulerMetrics) bool {
|
||||
return (*Metrics)(m).Equal(Metrics(o))
|
||||
}
|
||||
|
||||
func NewSchedulerMetrics() SchedulerMetrics {
|
||||
result := NewMetrics()
|
||||
return SchedulerMetrics(result)
|
||||
}
|
||||
|
||||
func parseSchedulerMetrics(data string) (SchedulerMetrics, error) {
|
||||
result := NewSchedulerMetrics()
|
||||
if err := parseMetrics(data, (*Metrics)(&result)); err != nil {
|
||||
return SchedulerMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
621
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
Normal file
621
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
Normal file
@ -0,0 +1,621 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
const (
|
||||
// NodeStartupThreshold is a rough estimate of the time allocated for a pod to start on a node.
|
||||
NodeStartupThreshold = 4 * time.Second
|
||||
|
||||
podStartupThreshold time.Duration = 5 * time.Second
|
||||
// We are setting 1s threshold for apicalls even in small clusters to avoid flakes.
|
||||
// The problem is that if long GC is happening in small clusters (where we have e.g.
|
||||
// 1-core master machines) and tests are pretty short, it may consume significant
|
||||
// portion of CPU and basically stop all the real work.
|
||||
// Increasing threshold to 1s is within our SLO and should solve this problem.
|
||||
apiCallLatencyThreshold time.Duration = 1 * time.Second
|
||||
|
||||
// We use a higher threshold for list apicalls if the cluster is big (i.e having > 500 nodes)
|
||||
// as list response sizes are bigger in general for big clusters. We also use a higher threshold
|
||||
// for list calls at cluster scope (this includes non-namespaced and all-namespaced calls).
|
||||
apiListCallLatencyThreshold time.Duration = 5 * time.Second
|
||||
apiClusterScopeListCallThreshold time.Duration = 10 * time.Second
|
||||
bigClusterNodeCountThreshold = 500
|
||||
|
||||
// Cluster Autoscaler metrics names
|
||||
caFunctionMetric = "cluster_autoscaler_function_duration_seconds_bucket"
|
||||
caFunctionMetricLabel = "function"
|
||||
)
|
||||
|
||||
type MetricsForE2E metrics.MetricsCollection
|
||||
|
||||
func (m *MetricsForE2E) filterMetrics() {
|
||||
interestingApiServerMetrics := make(metrics.ApiServerMetrics)
|
||||
for _, metric := range InterestingApiServerMetrics {
|
||||
interestingApiServerMetrics[metric] = (*m).ApiServerMetrics[metric]
|
||||
}
|
||||
interestingControllerManagerMetrics := make(metrics.ControllerManagerMetrics)
|
||||
for _, metric := range InterestingControllerManagerMetrics {
|
||||
interestingControllerManagerMetrics[metric] = (*m).ControllerManagerMetrics[metric]
|
||||
}
|
||||
interestingClusterAutoscalerMetrics := make(metrics.ClusterAutoscalerMetrics)
|
||||
for _, metric := range InterestingClusterAutoscalerMetrics {
|
||||
interestingClusterAutoscalerMetrics[metric] = (*m).ClusterAutoscalerMetrics[metric]
|
||||
}
|
||||
interestingKubeletMetrics := make(map[string]metrics.KubeletMetrics)
|
||||
for kubelet, grabbed := range (*m).KubeletMetrics {
|
||||
interestingKubeletMetrics[kubelet] = make(metrics.KubeletMetrics)
|
||||
for _, metric := range InterestingKubeletMetrics {
|
||||
interestingKubeletMetrics[kubelet][metric] = grabbed[metric]
|
||||
}
|
||||
}
|
||||
(*m).ApiServerMetrics = interestingApiServerMetrics
|
||||
(*m).ControllerManagerMetrics = interestingControllerManagerMetrics
|
||||
(*m).KubeletMetrics = interestingKubeletMetrics
|
||||
}
|
||||
|
||||
func (m *MetricsForE2E) PrintHumanReadable() string {
|
||||
buf := bytes.Buffer{}
|
||||
for _, interestingMetric := range InterestingApiServerMetrics {
|
||||
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
||||
for _, sample := range (*m).ApiServerMetrics[interestingMetric] {
|
||||
buf.WriteString(fmt.Sprintf("\t%v\n", metrics.PrintSample(sample)))
|
||||
}
|
||||
}
|
||||
for _, interestingMetric := range InterestingControllerManagerMetrics {
|
||||
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
||||
for _, sample := range (*m).ControllerManagerMetrics[interestingMetric] {
|
||||
buf.WriteString(fmt.Sprintf("\t%v\n", metrics.PrintSample(sample)))
|
||||
}
|
||||
}
|
||||
for _, interestingMetric := range InterestingClusterAutoscalerMetrics {
|
||||
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
||||
for _, sample := range (*m).ClusterAutoscalerMetrics[interestingMetric] {
|
||||
buf.WriteString(fmt.Sprintf("\t%v\n", metrics.PrintSample(sample)))
|
||||
}
|
||||
}
|
||||
for kubelet, grabbed := range (*m).KubeletMetrics {
|
||||
buf.WriteString(fmt.Sprintf("For %v:\n", kubelet))
|
||||
for _, interestingMetric := range InterestingKubeletMetrics {
|
||||
buf.WriteString(fmt.Sprintf("\tFor %v:\n", interestingMetric))
|
||||
for _, sample := range grabbed[interestingMetric] {
|
||||
buf.WriteString(fmt.Sprintf("\t\t%v\n", metrics.PrintSample(sample)))
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (m *MetricsForE2E) PrintJSON() string {
|
||||
m.filterMetrics()
|
||||
return PrettyPrintJSON(m)
|
||||
}
|
||||
|
||||
func (m *MetricsForE2E) SummaryKind() string {
|
||||
return "MetricsForE2E"
|
||||
}
|
||||
|
||||
var InterestingApiServerMetrics = []string{
|
||||
"apiserver_request_count",
|
||||
"apiserver_request_latencies_summary",
|
||||
"etcd_helper_cache_entry_count",
|
||||
"etcd_helper_cache_hit_count",
|
||||
"etcd_helper_cache_miss_count",
|
||||
"etcd_request_cache_add_latencies_summary",
|
||||
"etcd_request_cache_get_latencies_summary",
|
||||
"etcd_request_latencies_summary",
|
||||
}
|
||||
|
||||
var InterestingControllerManagerMetrics = []string{
|
||||
"garbage_collector_attempt_to_delete_queue_latency",
|
||||
"garbage_collector_attempt_to_delete_work_duration",
|
||||
"garbage_collector_attempt_to_orphan_queue_latency",
|
||||
"garbage_collector_attempt_to_orphan_work_duration",
|
||||
"garbage_collector_dirty_processing_latency_microseconds",
|
||||
"garbage_collector_event_processing_latency_microseconds",
|
||||
"garbage_collector_graph_changes_queue_latency",
|
||||
"garbage_collector_graph_changes_work_duration",
|
||||
"garbage_collector_orphan_processing_latency_microseconds",
|
||||
|
||||
"namespace_queue_latency",
|
||||
"namespace_queue_latency_sum",
|
||||
"namespace_queue_latency_count",
|
||||
"namespace_retries",
|
||||
"namespace_work_duration",
|
||||
"namespace_work_duration_sum",
|
||||
"namespace_work_duration_count",
|
||||
}
|
||||
|
||||
var InterestingKubeletMetrics = []string{
|
||||
"kubelet_container_manager_latency_microseconds",
|
||||
"kubelet_docker_errors",
|
||||
"kubelet_docker_operations_latency_microseconds",
|
||||
"kubelet_generate_pod_status_latency_microseconds",
|
||||
"kubelet_pod_start_latency_microseconds",
|
||||
"kubelet_pod_worker_latency_microseconds",
|
||||
"kubelet_pod_worker_start_latency_microseconds",
|
||||
"kubelet_sync_pods_latency_microseconds",
|
||||
}
|
||||
|
||||
var InterestingClusterAutoscalerMetrics = []string{
|
||||
"function_duration_seconds",
|
||||
"errors_total",
|
||||
"evicted_pods_total",
|
||||
}
|
||||
|
||||
// Dashboard metrics
|
||||
type LatencyMetric struct {
|
||||
Perc50 time.Duration `json:"Perc50"`
|
||||
Perc90 time.Duration `json:"Perc90"`
|
||||
Perc99 time.Duration `json:"Perc99"`
|
||||
Perc100 time.Duration `json:"Perc100"`
|
||||
}
|
||||
|
||||
type PodStartupLatency struct {
|
||||
Latency LatencyMetric `json:"latency"`
|
||||
}
|
||||
|
||||
func (l *PodStartupLatency) SummaryKind() string {
|
||||
return "PodStartupLatency"
|
||||
}
|
||||
|
||||
func (l *PodStartupLatency) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
func (l *PodStartupLatency) PrintJSON() string {
|
||||
return PrettyPrintJSON(PodStartupLatencyToPerfData(l))
|
||||
}
|
||||
|
||||
type SchedulingLatency struct {
|
||||
Scheduling LatencyMetric `json:"scheduling"`
|
||||
Binding LatencyMetric `json:"binding"`
|
||||
Total LatencyMetric `json:"total"`
|
||||
}
|
||||
|
||||
func (l *SchedulingLatency) SummaryKind() string {
|
||||
return "SchedulingLatency"
|
||||
}
|
||||
|
||||
func (l *SchedulingLatency) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
func (l *SchedulingLatency) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
type SaturationTime struct {
|
||||
TimeToSaturate time.Duration `json:"timeToSaturate"`
|
||||
NumberOfNodes int `json:"numberOfNodes"`
|
||||
NumberOfPods int `json:"numberOfPods"`
|
||||
Throughput float32 `json:"throughput"`
|
||||
}
|
||||
|
||||
type APICall struct {
|
||||
Resource string `json:"resource"`
|
||||
Subresource string `json:"subresource"`
|
||||
Verb string `json:"verb"`
|
||||
Scope string `json:"scope"`
|
||||
Latency LatencyMetric `json:"latency"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type APIResponsiveness struct {
|
||||
APICalls []APICall `json:"apicalls"`
|
||||
}
|
||||
|
||||
func (a *APIResponsiveness) SummaryKind() string {
|
||||
return "APIResponsiveness"
|
||||
}
|
||||
|
||||
func (a *APIResponsiveness) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(a)
|
||||
}
|
||||
|
||||
func (a *APIResponsiveness) PrintJSON() string {
|
||||
return PrettyPrintJSON(ApiCallToPerfData(a))
|
||||
}
|
||||
|
||||
func (a *APIResponsiveness) Len() int { return len(a.APICalls) }
|
||||
func (a *APIResponsiveness) Swap(i, j int) {
|
||||
a.APICalls[i], a.APICalls[j] = a.APICalls[j], a.APICalls[i]
|
||||
}
|
||||
func (a *APIResponsiveness) Less(i, j int) bool {
|
||||
return a.APICalls[i].Latency.Perc99 < a.APICalls[j].Latency.Perc99
|
||||
}
|
||||
|
||||
// Set request latency for a particular quantile in the APICall metric entry (creating one if necessary).
|
||||
// 0 <= quantile <=1 (e.g. 0.95 is 95%tile, 0.5 is median)
|
||||
// Only 0.5, 0.9 and 0.99 quantiles are supported.
|
||||
func (a *APIResponsiveness) addMetricRequestLatency(resource, subresource, verb, scope string, quantile float64, latency time.Duration) {
|
||||
for i, apicall := range a.APICalls {
|
||||
if apicall.Resource == resource && apicall.Subresource == subresource && apicall.Verb == verb && apicall.Scope == scope {
|
||||
a.APICalls[i] = setQuantileAPICall(apicall, quantile, latency)
|
||||
return
|
||||
}
|
||||
}
|
||||
apicall := setQuantileAPICall(APICall{Resource: resource, Subresource: subresource, Verb: verb, Scope: scope}, quantile, latency)
|
||||
a.APICalls = append(a.APICalls, apicall)
|
||||
}
|
||||
|
||||
// 0 <= quantile <=1 (e.g. 0.95 is 95%tile, 0.5 is median)
|
||||
// Only 0.5, 0.9 and 0.99 quantiles are supported.
|
||||
func setQuantileAPICall(apicall APICall, quantile float64, latency time.Duration) APICall {
|
||||
setQuantile(&apicall.Latency, quantile, latency)
|
||||
return apicall
|
||||
}
|
||||
|
||||
// Only 0.5, 0.9 and 0.99 quantiles are supported.
|
||||
func setQuantile(metric *LatencyMetric, quantile float64, latency time.Duration) {
|
||||
switch quantile {
|
||||
case 0.5:
|
||||
metric.Perc50 = latency
|
||||
case 0.9:
|
||||
metric.Perc90 = latency
|
||||
case 0.99:
|
||||
metric.Perc99 = latency
|
||||
}
|
||||
}
|
||||
|
||||
// Add request count to the APICall metric entry (creating one if necessary).
|
||||
func (a *APIResponsiveness) addMetricRequestCount(resource, subresource, verb, scope string, count int) {
|
||||
for i, apicall := range a.APICalls {
|
||||
if apicall.Resource == resource && apicall.Subresource == subresource && apicall.Verb == verb && apicall.Scope == scope {
|
||||
a.APICalls[i].Count += count
|
||||
return
|
||||
}
|
||||
}
|
||||
apicall := APICall{Resource: resource, Subresource: subresource, Verb: verb, Count: count, Scope: scope}
|
||||
a.APICalls = append(a.APICalls, apicall)
|
||||
}
|
||||
|
||||
func readLatencyMetrics(c clientset.Interface) (*APIResponsiveness, error) {
|
||||
var a APIResponsiveness
|
||||
|
||||
body, err := getMetrics(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
samples, err := extractMetricSamples(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ignoredResources := sets.NewString("events")
|
||||
// TODO: figure out why we're getting non-capitalized proxy and fix this.
|
||||
ignoredVerbs := sets.NewString("WATCH", "WATCHLIST", "PROXY", "proxy", "CONNECT")
|
||||
|
||||
for _, sample := range samples {
|
||||
// Example line:
|
||||
// apiserver_request_latencies_summary{resource="namespaces",verb="LIST",quantile="0.99"} 908
|
||||
// apiserver_request_count{resource="pods",verb="LIST",client="kubectl",code="200",contentType="json"} 233
|
||||
if sample.Metric[model.MetricNameLabel] != "apiserver_request_latencies_summary" &&
|
||||
sample.Metric[model.MetricNameLabel] != "apiserver_request_count" {
|
||||
continue
|
||||
}
|
||||
|
||||
resource := string(sample.Metric["resource"])
|
||||
subresource := string(sample.Metric["subresource"])
|
||||
verb := string(sample.Metric["verb"])
|
||||
scope := string(sample.Metric["scope"])
|
||||
if ignoredResources.Has(resource) || ignoredVerbs.Has(verb) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch sample.Metric[model.MetricNameLabel] {
|
||||
case "apiserver_request_latencies_summary":
|
||||
latency := sample.Value
|
||||
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.addMetricRequestLatency(resource, subresource, verb, scope, quantile, time.Duration(int64(latency))*time.Microsecond)
|
||||
case "apiserver_request_count":
|
||||
count := sample.Value
|
||||
a.addMetricRequestCount(resource, subresource, verb, scope, int(count))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return &a, err
|
||||
}
|
||||
|
||||
// Prints top five summary metrics for request types with latency and returns
|
||||
// number of such request types above threshold. We use a higher threshold for
|
||||
// list calls if nodeCount is above a given threshold (i.e. cluster is big).
|
||||
func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIResponsiveness, error) {
|
||||
isBigCluster := (nodeCount > bigClusterNodeCountThreshold)
|
||||
metrics, err := readLatencyMetrics(c)
|
||||
if err != nil {
|
||||
return 0, metrics, err
|
||||
}
|
||||
sort.Sort(sort.Reverse(metrics))
|
||||
badMetrics := 0
|
||||
top := 5
|
||||
for i := range metrics.APICalls {
|
||||
latency := metrics.APICalls[i].Latency.Perc99
|
||||
isListCall := (metrics.APICalls[i].Verb == "LIST")
|
||||
isClusterScopedCall := (metrics.APICalls[i].Scope == "cluster")
|
||||
isBad := false
|
||||
latencyThreshold := apiCallLatencyThreshold
|
||||
if isListCall && isBigCluster {
|
||||
latencyThreshold = apiListCallLatencyThreshold
|
||||
if isClusterScopedCall {
|
||||
latencyThreshold = apiClusterScopeListCallThreshold
|
||||
}
|
||||
}
|
||||
if latency > latencyThreshold {
|
||||
isBad = true
|
||||
badMetrics++
|
||||
}
|
||||
if top > 0 || isBad {
|
||||
top--
|
||||
prefix := ""
|
||||
if isBad {
|
||||
prefix = "WARNING "
|
||||
}
|
||||
Logf("%vTop latency metric: %+v", prefix, metrics.APICalls[i])
|
||||
}
|
||||
}
|
||||
return badMetrics, metrics, nil
|
||||
}
|
||||
|
||||
// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
|
||||
// within the threshold.
|
||||
func VerifyPodStartupLatency(latency *PodStartupLatency) error {
|
||||
if latency.Latency.Perc50 > podStartupThreshold {
|
||||
return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50)
|
||||
}
|
||||
if latency.Latency.Perc90 > podStartupThreshold {
|
||||
return fmt.Errorf("too high pod startup latency 90th percentile: %v", latency.Latency.Perc90)
|
||||
}
|
||||
if latency.Latency.Perc99 > podStartupThreshold {
|
||||
return fmt.Errorf("too high pod startup latency 99th percentile: %v", latency.Latency.Perc99)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resets latency metrics in apiserver.
|
||||
func ResetMetrics(c clientset.Interface) error {
|
||||
Logf("Resetting latency metrics in apiserver...")
|
||||
body, err := c.CoreV1().RESTClient().Delete().AbsPath("/metrics").DoRaw()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if string(body) != "metrics reset\n" {
|
||||
return fmt.Errorf("Unexpected response: %q", string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieves metrics information.
|
||||
func getMetrics(c clientset.Interface) (string, error) {
|
||||
body, err := c.CoreV1().RESTClient().Get().AbsPath("/metrics").DoRaw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(body), nil
|
||||
}
|
||||
|
||||
// Retrieves scheduler metrics information.
|
||||
func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
||||
result := SchedulingLatency{}
|
||||
|
||||
// Check if master Node is registered
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
|
||||
var data string
|
||||
var masterRegistered = false
|
||||
for _, node := range nodes.Items {
|
||||
if system.IsMasterNode(node.Name) {
|
||||
masterRegistered = true
|
||||
}
|
||||
}
|
||||
if masterRegistered {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
var rawData []byte
|
||||
rawData, err = c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(metav1.NamespaceSystem).
|
||||
Resource("pods").
|
||||
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
|
||||
SubResource("proxy").
|
||||
Suffix("metrics").
|
||||
Do().Raw()
|
||||
|
||||
ExpectNoError(err)
|
||||
data = string(rawData)
|
||||
} else {
|
||||
// If master is not registered fall back to old method of using SSH.
|
||||
if TestContext.Provider == "gke" {
|
||||
Logf("Not grabbing scheduler metrics through master SSH: unsupported for gke")
|
||||
return nil, nil
|
||||
}
|
||||
cmd := "curl http://localhost:10251/metrics"
|
||||
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil || sshResult.Code != 0 {
|
||||
return &result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
|
||||
}
|
||||
data = sshResult.Stdout
|
||||
}
|
||||
samples, err := extractMetricSamples(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, sample := range samples {
|
||||
var metric *LatencyMetric = nil
|
||||
switch sample.Metric[model.MetricNameLabel] {
|
||||
case "scheduler_scheduling_algorithm_latency_microseconds":
|
||||
metric = &result.Scheduling
|
||||
case "scheduler_binding_latency_microseconds":
|
||||
metric = &result.Binding
|
||||
case "scheduler_e2e_scheduling_latency_microseconds":
|
||||
metric = &result.Total
|
||||
}
|
||||
if metric == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
latency := sample.Value
|
||||
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setQuantile(metric, quantile, time.Duration(int64(latency))*time.Microsecond)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// Verifies (currently just by logging them) the scheduling latencies.
|
||||
func VerifySchedulerLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
||||
latency, err := getSchedulingLatency(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return latency, nil
|
||||
}
|
||||
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||
Logf("Error building encoder: %v", err)
|
||||
return ""
|
||||
}
|
||||
formatted := &bytes.Buffer{}
|
||||
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
|
||||
Logf("Error indenting: %v", err)
|
||||
return ""
|
||||
}
|
||||
return string(formatted.Bytes())
|
||||
}
|
||||
|
||||
// extractMetricSamples parses the prometheus metric samples from the input string.
|
||||
func extractMetricSamples(metricsBlob string) ([]*model.Sample, error) {
|
||||
dec := expfmt.NewDecoder(strings.NewReader(metricsBlob), expfmt.FmtText)
|
||||
decoder := expfmt.SampleDecoder{
|
||||
Dec: dec,
|
||||
Opts: &expfmt.DecodeOptions{},
|
||||
}
|
||||
|
||||
var samples []*model.Sample
|
||||
for {
|
||||
var v model.Vector
|
||||
if err := decoder.Decode(&v); err != nil {
|
||||
if err == io.EOF {
|
||||
// Expected loop termination condition.
|
||||
return samples, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
samples = append(samples, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// PodLatencyData encapsulates pod startup latency information.
|
||||
type PodLatencyData struct {
|
||||
// Name of the pod
|
||||
Name string
|
||||
// Node this pod was running on
|
||||
Node string
|
||||
// Latency information related to pod startuptime
|
||||
Latency time.Duration
|
||||
}
|
||||
|
||||
type LatencySlice []PodLatencyData
|
||||
|
||||
func (a LatencySlice) Len() int { return len(a) }
|
||||
func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency }
|
||||
|
||||
func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
|
||||
length := len(latencies)
|
||||
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
|
||||
perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency
|
||||
perc99 := latencies[int(math.Ceil(float64(length*99)/100))-1].Latency
|
||||
perc100 := latencies[length-1].Latency
|
||||
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99, Perc100: perc100}
|
||||
}
|
||||
|
||||
// LogSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times
|
||||
// If latencyDataLag is nil then it will be populated from latencyData
|
||||
func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLatencyData, nodeCount int, c clientset.Interface) {
|
||||
if latencyDataLag == nil {
|
||||
latencyDataLag = latencyData
|
||||
}
|
||||
for _, l := range latencyData {
|
||||
if l.Latency > NodeStartupThreshold {
|
||||
HighLatencyKubeletOperations(c, 1*time.Second, l.Node, Logf)
|
||||
}
|
||||
}
|
||||
Logf("Approx throughput: %v pods/min",
|
||||
float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes()))
|
||||
}
|
||||
|
||||
func PrintLatencies(latencies []PodLatencyData, header string) {
|
||||
metrics := ExtractLatencyMetrics(latencies)
|
||||
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
|
||||
Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
|
||||
}
|
||||
|
||||
func (m *MetricsForE2E) computeClusterAutoscalerMetricsDelta(before metrics.MetricsCollection) {
|
||||
if beforeSamples, found := before.ClusterAutoscalerMetrics[caFunctionMetric]; found {
|
||||
if afterSamples, found := m.ClusterAutoscalerMetrics[caFunctionMetric]; found {
|
||||
beforeSamplesMap := make(map[string]*model.Sample)
|
||||
for _, bSample := range beforeSamples {
|
||||
beforeSamplesMap[makeKey(bSample.Metric[caFunctionMetricLabel], bSample.Metric["le"])] = bSample
|
||||
}
|
||||
for _, aSample := range afterSamples {
|
||||
if bSample, found := beforeSamplesMap[makeKey(aSample.Metric[caFunctionMetricLabel], aSample.Metric["le"])]; found {
|
||||
aSample.Value = aSample.Value - bSample.Value
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeKey(a, b model.LabelValue) string {
|
||||
return string(a) + "___" + string(b)
|
||||
}
|
977
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
Normal file
977
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
Normal file
@ -0,0 +1,977 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
EndpointHttpPort = 8080
|
||||
EndpointUdpPort = 8081
|
||||
TestContainerHttpPort = 8080
|
||||
ClusterHttpPort = 80
|
||||
ClusterUdpPort = 90
|
||||
testPodName = "test-container-pod"
|
||||
hostTestPodName = "host-test-container-pod"
|
||||
nodePortServiceName = "node-port-service"
|
||||
sessionAffinityServiceName = "session-affinity-service"
|
||||
// wait time between poll attempts of a Service vip and/or nodePort.
|
||||
// coupled with testTries to produce a net timeout value.
|
||||
hitEndpointRetryDelay = 2 * time.Second
|
||||
// Number of retries to hit a given set of endpoints. Needs to be high
|
||||
// because we verify iptables statistical rr loadbalancing.
|
||||
testTries = 30
|
||||
// Maximum number of pods in a test, to make test work in large clusters.
|
||||
maxNetProxyPodsCount = 10
|
||||
// Number of checks to hit a given set of endpoints when enable session affinity.
|
||||
SessionAffinityChecks = 10
|
||||
)
|
||||
|
||||
var NetexecImageName = imageutils.GetE2EImage(imageutils.Netexec)
|
||||
|
||||
// NewNetworkingTestConfig creates and sets up a new test config helper.
|
||||
func NewNetworkingTestConfig(f *Framework) *NetworkingTestConfig {
|
||||
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name}
|
||||
By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
|
||||
config.setup(getServiceSelector())
|
||||
return config
|
||||
}
|
||||
|
||||
// NewNetworkingTestNodeE2EConfig creates and sets up a new test config helper for Node E2E.
|
||||
func NewCoreNetworkingTestConfig(f *Framework) *NetworkingTestConfig {
|
||||
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name}
|
||||
By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
|
||||
config.setupCore(getServiceSelector())
|
||||
return config
|
||||
}
|
||||
|
||||
func getServiceSelector() map[string]string {
|
||||
By("creating a selector")
|
||||
selectorName := "selector-" + string(uuid.NewUUID())
|
||||
serviceSelector := map[string]string{
|
||||
selectorName: "true",
|
||||
}
|
||||
return serviceSelector
|
||||
}
|
||||
|
||||
// NetworkingTestConfig is a convenience class around some utility methods
|
||||
// for testing kubeproxy/networking/services/endpoints.
|
||||
type NetworkingTestConfig struct {
|
||||
// TestContaienrPod is a test pod running the netexec image. It is capable
|
||||
// of executing tcp/udp requests against ip:port.
|
||||
TestContainerPod *v1.Pod
|
||||
// HostTestContainerPod is a pod running with hostNetworking=true, and the
|
||||
// hostexec image.
|
||||
HostTestContainerPod *v1.Pod
|
||||
// EndpointPods are the pods belonging to the Service created by this
|
||||
// test config. Each invocation of `setup` creates a service with
|
||||
// 1 pod per node running the netexecImage.
|
||||
EndpointPods []*v1.Pod
|
||||
f *Framework
|
||||
podClient *PodClient
|
||||
// NodePortService is a Service with Type=NodePort spanning over all
|
||||
// endpointPods.
|
||||
NodePortService *v1.Service
|
||||
// SessionAffinityService is a Service with SessionAffinity=ClientIP
|
||||
// spanning over all endpointPods.
|
||||
SessionAffinityService *v1.Service
|
||||
// ExternalAddrs is a list of external IPs of nodes in the cluster.
|
||||
ExternalAddrs []string
|
||||
// Nodes is a list of nodes in the cluster.
|
||||
Nodes []v1.Node
|
||||
// MaxTries is the number of retries tolerated for tests run against
|
||||
// endpoints and services created by this config.
|
||||
MaxTries int
|
||||
// The ClusterIP of the Service reated by this test config.
|
||||
ClusterIP string
|
||||
// External ip of first node for use in nodePort testing.
|
||||
NodeIP string
|
||||
// The http/udp nodePorts of the Service.
|
||||
NodeHttpPort int
|
||||
NodeUdpPort int
|
||||
// The kubernetes namespace within which all resources for this
|
||||
// config are created
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) DialFromEndpointContainer(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) {
|
||||
config.DialFromContainer(protocol, config.EndpointPods[0].Status.PodIP, targetIP, EndpointHttpPort, targetPort, maxTries, minTries, expectedEps)
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) DialFromTestContainer(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) {
|
||||
config.DialFromContainer(protocol, config.TestContainerPod.Status.PodIP, targetIP, TestContainerHttpPort, targetPort, maxTries, minTries, expectedEps)
|
||||
}
|
||||
|
||||
// diagnoseMissingEndpoints prints debug information about the endpoints that
|
||||
// are NOT in the given list of foundEndpoints. These are the endpoints we
|
||||
// expected a response from.
|
||||
func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets.String) {
|
||||
for _, e := range config.EndpointPods {
|
||||
if foundEndpoints.Has(e.Name) {
|
||||
continue
|
||||
}
|
||||
Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
|
||||
desc, _ := RunKubectl(
|
||||
"describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
|
||||
Logf(desc)
|
||||
}
|
||||
}
|
||||
|
||||
// EndpointHostnames returns a set of hostnames for existing endpoints.
|
||||
func (config *NetworkingTestConfig) EndpointHostnames() sets.String {
|
||||
expectedEps := sets.NewString()
|
||||
for _, p := range config.EndpointPods {
|
||||
expectedEps.Insert(p.Name)
|
||||
}
|
||||
return expectedEps
|
||||
}
|
||||
|
||||
// DialFromContainers executes a curl via kubectl exec in a test container,
|
||||
// which might then translate to a tcp or udp request based on the protocol
|
||||
// argument in the url.
|
||||
// - minTries is the minimum number of curl attempts required before declaring
|
||||
// success. Set to 0 if you'd like to return as soon as all endpoints respond
|
||||
// at least once.
|
||||
// - maxTries is the maximum number of curl attempts. If this many attempts pass
|
||||
// and we don't see all expected endpoints, the test fails.
|
||||
// - expectedEps is the set of endpointnames to wait for. Typically this is also
|
||||
// the hostname reported by each pod in the service through /hostName.
|
||||
// maxTries == minTries will confirm that we see the expected endpoints and no
|
||||
// more for maxTries. Use this if you want to eg: fail a readiness check on a
|
||||
// pod and confirm it doesn't show up as an endpoint.
|
||||
func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, targetIP string, containerHttpPort, targetPort, maxTries, minTries int, expectedEps sets.String) {
|
||||
ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHttpPort))
|
||||
// The current versions of curl included in CentOS and RHEL distros
|
||||
// misinterpret square brackets around IPv6 as globbing, so use the -g
|
||||
// argument to disable globbing to handle the IPv6 case.
|
||||
cmd := fmt.Sprintf("curl -g -q -s 'http://%s/dial?request=hostName&protocol=%s&host=%s&port=%d&tries=1'",
|
||||
ipPort,
|
||||
protocol,
|
||||
targetIP,
|
||||
targetPort)
|
||||
|
||||
eps := sets.NewString()
|
||||
|
||||
for i := 0; i < maxTries; i++ {
|
||||
stdout, stderr, err := config.f.ExecShellInPodWithFullOutput(config.HostTestContainerPod.Name, cmd)
|
||||
if err != nil {
|
||||
// A failure to kubectl exec counts as a try, not a hard fail.
|
||||
// Also note that we will keep failing for maxTries in tests where
|
||||
// we confirm unreachability.
|
||||
Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
|
||||
} else {
|
||||
var output map[string][]string
|
||||
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
|
||||
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
|
||||
cmd, config.HostTestContainerPod.Name, stdout, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, hostName := range output["responses"] {
|
||||
trimmed := strings.TrimSpace(hostName)
|
||||
if trimmed != "" {
|
||||
eps.Insert(trimmed)
|
||||
}
|
||||
}
|
||||
}
|
||||
Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
|
||||
|
||||
// Check against i+1 so we exit if minTries == maxTries.
|
||||
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
|
||||
return
|
||||
}
|
||||
// TODO: get rid of this delay #36281
|
||||
time.Sleep(hitEndpointRetryDelay)
|
||||
}
|
||||
|
||||
config.diagnoseMissingEndpoints(eps)
|
||||
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) GetEndpointsFromTestContainer(protocol, targetIP string, targetPort, tries int) (sets.String, error) {
|
||||
return config.GetEndpointsFromContainer(protocol, config.TestContainerPod.Status.PodIP, targetIP, TestContainerHttpPort, targetPort, tries)
|
||||
}
|
||||
|
||||
// GetEndpointsFromContainer executes a curl via kubectl exec in a test container,
|
||||
// which might then translate to a tcp or udp request based on the protocol argument
|
||||
// in the url. It returns all different endpoints from multiple retries.
|
||||
// - tries is the number of curl attempts. If this many attempts pass and
|
||||
// we don't see any endpoints, the test fails.
|
||||
func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containerIP, targetIP string, containerHttpPort, targetPort, tries int) (sets.String, error) {
|
||||
ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHttpPort))
|
||||
// The current versions of curl included in CentOS and RHEL distros
|
||||
// misinterpret square brackets around IPv6 as globbing, so use the -g
|
||||
// argument to disable globbing to handle the IPv6 case.
|
||||
cmd := fmt.Sprintf("curl -g -q -s 'http://%s/dial?request=hostName&protocol=%s&host=%s&port=%d&tries=1'",
|
||||
ipPort,
|
||||
protocol,
|
||||
targetIP,
|
||||
targetPort)
|
||||
|
||||
eps := sets.NewString()
|
||||
|
||||
for i := 0; i < tries; i++ {
|
||||
stdout, stderr, err := config.f.ExecShellInPodWithFullOutput(config.HostTestContainerPod.Name, cmd)
|
||||
if err != nil {
|
||||
// A failure to kubectl exec counts as a try, not a hard fail.
|
||||
// Also note that we will keep failing for maxTries in tests where
|
||||
// we confirm unreachability.
|
||||
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
|
||||
} else {
|
||||
Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod)
|
||||
var output map[string][]string
|
||||
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
|
||||
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
|
||||
cmd, config.HostTestContainerPod.Name, stdout, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, hostName := range output["responses"] {
|
||||
trimmed := strings.TrimSpace(hostName)
|
||||
if trimmed != "" {
|
||||
eps.Insert(trimmed)
|
||||
}
|
||||
}
|
||||
// TODO: get rid of this delay #36281
|
||||
time.Sleep(hitEndpointRetryDelay)
|
||||
}
|
||||
}
|
||||
return eps, nil
|
||||
}
|
||||
|
||||
// DialFromNode executes a tcp or udp request based on protocol via kubectl exec
|
||||
// in a test container running with host networking.
|
||||
// - minTries is the minimum number of curl attempts required before declaring
|
||||
// success. Set to 0 if you'd like to return as soon as all endpoints respond
|
||||
// at least once.
|
||||
// - maxTries is the maximum number of curl attempts. If this many attempts pass
|
||||
// and we don't see all expected endpoints, the test fails.
|
||||
// maxTries == minTries will confirm that we see the expected endpoints and no
|
||||
// more for maxTries. Use this if you want to eg: fail a readiness check on a
|
||||
// pod and confirm it doesn't show up as an endpoint.
|
||||
func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) {
|
||||
var cmd string
|
||||
if protocol == "udp" {
|
||||
// TODO: It would be enough to pass 1s+epsilon to timeout, but unfortunately
|
||||
// busybox timeout doesn't support non-integer values.
|
||||
cmd = fmt.Sprintf("echo 'hostName' | timeout -t 2 nc -w 1 -u %s %d", targetIP, targetPort)
|
||||
} else {
|
||||
ipPort := net.JoinHostPort(targetIP, strconv.Itoa(targetPort))
|
||||
// The current versions of curl included in CentOS and RHEL distros
|
||||
// misinterpret square brackets around IPv6 as globbing, so use the -g
|
||||
// argument to disable globbing to handle the IPv6 case.
|
||||
cmd = fmt.Sprintf("timeout -t 15 curl -g -q -s --connect-timeout 1 http://%s/hostName", ipPort)
|
||||
}
|
||||
|
||||
// TODO: This simply tells us that we can reach the endpoints. Check that
|
||||
// the probability of hitting a specific endpoint is roughly the same as
|
||||
// hitting any other.
|
||||
eps := sets.NewString()
|
||||
|
||||
filterCmd := fmt.Sprintf("%s | grep -v '^\\s*$'", cmd)
|
||||
for i := 0; i < maxTries; i++ {
|
||||
stdout, stderr, err := config.f.ExecShellInPodWithFullOutput(config.HostTestContainerPod.Name, filterCmd)
|
||||
if err != nil || len(stderr) > 0 {
|
||||
// A failure to exec command counts as a try, not a hard fail.
|
||||
// Also note that we will keep failing for maxTries in tests where
|
||||
// we confirm unreachability.
|
||||
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
|
||||
} else {
|
||||
trimmed := strings.TrimSpace(stdout)
|
||||
if trimmed != "" {
|
||||
eps.Insert(trimmed)
|
||||
}
|
||||
}
|
||||
|
||||
// Check against i+1 so we exit if minTries == maxTries.
|
||||
if eps.Equal(expectedEps) && i+1 >= minTries {
|
||||
Logf("Found all expected endpoints: %+v", eps.List())
|
||||
return
|
||||
}
|
||||
|
||||
Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List())
|
||||
|
||||
// TODO: get rid of this delay #36281
|
||||
time.Sleep(hitEndpointRetryDelay)
|
||||
}
|
||||
|
||||
config.diagnoseMissingEndpoints(eps)
|
||||
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
|
||||
}
|
||||
|
||||
// GetSelfURL executes a curl against the given path via kubectl exec into a
|
||||
// test container running with host networking, and fails if the output
|
||||
// doesn't match the expected string.
|
||||
func (config *NetworkingTestConfig) GetSelfURL(port int32, path string, expected string) {
|
||||
cmd := fmt.Sprintf("curl -i -q -s --connect-timeout 1 http://localhost:%d%s", port, path)
|
||||
By(fmt.Sprintf("Getting kube-proxy self URL %s", path))
|
||||
config.executeCurlCmd(cmd, expected)
|
||||
}
|
||||
|
||||
// GetSelfStatusCode executes a curl against the given path via kubectl exec into a
|
||||
// test container running with host networking, and fails if the returned status
|
||||
// code doesn't match the expected string.
|
||||
func (config *NetworkingTestConfig) GetSelfURLStatusCode(port int32, path string, expected string) {
|
||||
// check status code
|
||||
cmd := fmt.Sprintf("curl -o /dev/null -i -q -s -w %%{http_code} --connect-timeout 1 http://localhost:%d%s", port, path)
|
||||
By(fmt.Sprintf("Checking status code against http://localhost:%d%s", port, path))
|
||||
config.executeCurlCmd(cmd, expected)
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string) {
|
||||
// These are arbitrary timeouts. The curl command should pass on first try,
|
||||
// unless remote server is starved/bootstrapping/restarting etc.
|
||||
const retryInterval = 1 * time.Second
|
||||
const retryTimeout = 30 * time.Second
|
||||
podName := config.HostTestContainerPod.Name
|
||||
var msg string
|
||||
if pollErr := wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) {
|
||||
stdout, err := RunHostCmd(config.Namespace, podName, cmd)
|
||||
if err != nil {
|
||||
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
|
||||
Logf(msg)
|
||||
return false, nil
|
||||
}
|
||||
if !strings.Contains(stdout, expected) {
|
||||
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
|
||||
Logf(msg)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); pollErr != nil {
|
||||
Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
|
||||
desc, _ := RunKubectl(
|
||||
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
|
||||
Logf("%s", desc)
|
||||
Failf("Timed out in %v: %v", retryTimeout, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createNetShellPodSpec(podName, hostname string) *v1.Pod {
|
||||
probe := &v1.Probe{
|
||||
InitialDelaySeconds: 10,
|
||||
TimeoutSeconds: 30,
|
||||
PeriodSeconds: 10,
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 3,
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/healthz",
|
||||
Port: intstr.IntOrString{IntVal: EndpointHttpPort},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: config.Namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "webserver",
|
||||
Image: NetexecImageName,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: []string{
|
||||
"/netexec",
|
||||
fmt.Sprintf("--http-port=%d", EndpointHttpPort),
|
||||
fmt.Sprintf("--udp-port=%d", EndpointUdpPort),
|
||||
},
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: EndpointHttpPort,
|
||||
},
|
||||
{
|
||||
Name: "udp",
|
||||
ContainerPort: EndpointUdpPort,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
},
|
||||
},
|
||||
LivenessProbe: probe,
|
||||
ReadinessProbe: probe,
|
||||
},
|
||||
},
|
||||
NodeSelector: map[string]string{
|
||||
"kubernetes.io/hostname": hostname,
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPodName,
|
||||
Namespace: config.Namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "webserver",
|
||||
Image: NetexecImageName,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: []string{
|
||||
"/netexec",
|
||||
fmt.Sprintf("--http-port=%d", EndpointHttpPort),
|
||||
fmt.Sprintf("--udp-port=%d", EndpointUdpPort),
|
||||
},
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: TestContainerHttpPort,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createNodePortServiceSpec(svcName string, selector map[string]string, enableSessionAffinity bool) *v1.Service {
|
||||
sessionAffinity := v1.ServiceAffinityNone
|
||||
if enableSessionAffinity {
|
||||
sessionAffinity = v1.ServiceAffinityClientIP
|
||||
}
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: svcName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{Port: ClusterHttpPort, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(EndpointHttpPort)},
|
||||
{Port: ClusterUdpPort, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(EndpointUdpPort)},
|
||||
},
|
||||
Selector: selector,
|
||||
SessionAffinity: sessionAffinity,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createNodePortService(selector map[string]string) {
|
||||
config.NodePortService = config.createService(config.createNodePortServiceSpec(nodePortServiceName, selector, false))
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createSessionAffinityService(selector map[string]string) {
|
||||
config.SessionAffinityService = config.createService(config.createNodePortServiceSpec(sessionAffinityServiceName, selector, true))
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) DeleteNodePortService() {
|
||||
err := config.getServiceClient().Delete(config.NodePortService.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "error while deleting NodePortService. err:%v)", err)
|
||||
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createTestPods() {
|
||||
testContainerPod := config.createTestPodSpec()
|
||||
hostTestContainerPod := NewHostExecPodSpec(config.Namespace, hostTestPodName)
|
||||
|
||||
config.createPod(testContainerPod)
|
||||
config.createPod(hostTestContainerPod)
|
||||
|
||||
ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name))
|
||||
ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
|
||||
|
||||
var err error
|
||||
config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
|
||||
}
|
||||
|
||||
config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service {
|
||||
_, err := config.getServiceClient().Create(serviceSpec)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
|
||||
|
||||
err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
|
||||
|
||||
createdService, err := config.getServiceClient().Get(serviceSpec.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
|
||||
|
||||
return createdService
|
||||
}
|
||||
|
||||
// setupCore sets up the pods and core test config
|
||||
// mainly for simplified node e2e setup
|
||||
func (config *NetworkingTestConfig) setupCore(selector map[string]string) {
|
||||
By("Creating the service pods in kubernetes")
|
||||
podName := "netserver"
|
||||
config.EndpointPods = config.createNetProxyPods(podName, selector)
|
||||
|
||||
By("Creating test pods")
|
||||
config.createTestPods()
|
||||
|
||||
epCount := len(config.EndpointPods)
|
||||
config.MaxTries = epCount*epCount + testTries
|
||||
}
|
||||
|
||||
// setup includes setupCore and also sets up services
|
||||
func (config *NetworkingTestConfig) setup(selector map[string]string) {
|
||||
config.setupCore(selector)
|
||||
|
||||
By("Getting node addresses")
|
||||
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
|
||||
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
|
||||
config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeExternalIP)
|
||||
|
||||
SkipUnlessNodeCountIsAtLeast(2)
|
||||
config.Nodes = nodeList.Items
|
||||
|
||||
By("Creating the service on top of the pods in kubernetes")
|
||||
config.createNodePortService(selector)
|
||||
config.createSessionAffinityService(selector)
|
||||
|
||||
for _, p := range config.NodePortService.Spec.Ports {
|
||||
switch p.Protocol {
|
||||
case v1.ProtocolUDP:
|
||||
config.NodeUdpPort = int(p.NodePort)
|
||||
case v1.ProtocolTCP:
|
||||
config.NodeHttpPort = int(p.NodePort)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
config.ClusterIP = config.NodePortService.Spec.ClusterIP
|
||||
if len(config.ExternalAddrs) != 0 {
|
||||
config.NodeIP = config.ExternalAddrs[0]
|
||||
} else {
|
||||
internalAddrs := NodeAddresses(nodeList, v1.NodeInternalIP)
|
||||
config.NodeIP = internalAddrs[0]
|
||||
}
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) cleanup() {
|
||||
nsClient := config.getNamespacesClient()
|
||||
nsList, err := nsClient.List(metav1.ListOptions{})
|
||||
if err == nil {
|
||||
for _, ns := range nsList.Items {
|
||||
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace {
|
||||
nsClient.Delete(ns.Name, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shuffleNodes copies nodes from the specified slice into a copy in random
|
||||
// order. It returns a new slice.
|
||||
func shuffleNodes(nodes []v1.Node) []v1.Node {
|
||||
shuffled := make([]v1.Node, len(nodes))
|
||||
perm := rand.Perm(len(nodes))
|
||||
for i, j := range perm {
|
||||
shuffled[j] = nodes[i]
|
||||
}
|
||||
return shuffled
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod {
|
||||
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
|
||||
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
|
||||
|
||||
// To make this test work reasonably fast in large clusters,
|
||||
// we limit the number of NetProxyPods to no more than
|
||||
// maxNetProxyPodsCount on random nodes.
|
||||
nodes := shuffleNodes(nodeList.Items)
|
||||
if len(nodes) > maxNetProxyPodsCount {
|
||||
nodes = nodes[:maxNetProxyPodsCount]
|
||||
}
|
||||
|
||||
// create pods, one for each node
|
||||
createdPods := make([]*v1.Pod, 0, len(nodes))
|
||||
for i, n := range nodes {
|
||||
podName := fmt.Sprintf("%s-%d", podName, i)
|
||||
hostname, _ := n.Labels["kubernetes.io/hostname"]
|
||||
pod := config.createNetShellPodSpec(podName, hostname)
|
||||
pod.ObjectMeta.Labels = selector
|
||||
createdPod := config.createPod(pod)
|
||||
createdPods = append(createdPods, createdPod)
|
||||
}
|
||||
|
||||
// wait that all of them are up
|
||||
runningPods := make([]*v1.Pod, 0, len(nodes))
|
||||
for _, p := range createdPods {
|
||||
ExpectNoError(config.f.WaitForPodReady(p.Name))
|
||||
rp, err := config.getPodClient().Get(p.Name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
runningPods = append(runningPods, rp)
|
||||
}
|
||||
|
||||
return runningPods
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) DeleteNetProxyPod() {
|
||||
pod := config.EndpointPods[0]
|
||||
config.getPodClient().Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
config.EndpointPods = config.EndpointPods[1:]
|
||||
// wait for pod being deleted.
|
||||
err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
||||
if err != nil {
|
||||
Failf("Failed to delete %s pod: %v", pod.Name, err)
|
||||
}
|
||||
// wait for endpoint being removed.
|
||||
err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
|
||||
if err != nil {
|
||||
Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
|
||||
}
|
||||
// wait for kube-proxy to catch up with the pod being deleted.
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createPod(pod *v1.Pod) *v1.Pod {
|
||||
return config.getPodClient().Create(pod)
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) getPodClient() *PodClient {
|
||||
if config.podClient == nil {
|
||||
config.podClient = config.f.PodClient()
|
||||
}
|
||||
return config.podClient
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) getServiceClient() coreclientset.ServiceInterface {
|
||||
return config.f.ClientSet.CoreV1().Services(config.Namespace)
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.NamespaceInterface {
|
||||
return config.f.ClientSet.CoreV1().Namespaces()
|
||||
}
|
||||
|
||||
func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) {
|
||||
cmd := fmt.Sprintf("wget -T 5 -qO- %q", target)
|
||||
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
_, err := RunHostCmd(namespace, pod, cmd)
|
||||
if expectToBeReachable && err != nil {
|
||||
Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !expectToBeReachable && err == nil {
|
||||
Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Does an HTTP GET, but does not reuse TCP connections
|
||||
// This masks problems where the iptables rule has changed, but we don't see it
|
||||
// This is intended for relatively quick requests (status checks), so we set a short (5 seconds) timeout
|
||||
func httpGetNoConnectionPool(url string) (*http.Response, error) {
|
||||
return httpGetNoConnectionPoolTimeout(url, 5*time.Second)
|
||||
}
|
||||
|
||||
func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) {
|
||||
tr := utilnet.SetTransportDefaults(&http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
})
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
return client.Get(url)
|
||||
}
|
||||
|
||||
func TestReachableHTTP(ip string, port int, request string, expect string) (bool, error) {
|
||||
return TestReachableHTTPWithContent(ip, port, request, expect, nil)
|
||||
}
|
||||
|
||||
func TestReachableHTTPWithRetriableErrorCodes(ip string, port int, request string, expect string, retriableErrCodes []int) (bool, error) {
|
||||
return TestReachableHTTPWithContentTimeoutWithRetriableErrorCodes(ip, port, request, expect, nil, retriableErrCodes, time.Second*5)
|
||||
}
|
||||
|
||||
func TestReachableHTTPWithContent(ip string, port int, request string, expect string, content *bytes.Buffer) (bool, error) {
|
||||
return TestReachableHTTPWithContentTimeout(ip, port, request, expect, content, 5*time.Second)
|
||||
}
|
||||
|
||||
func TestReachableHTTPWithContentTimeout(ip string, port int, request string, expect string, content *bytes.Buffer, timeout time.Duration) (bool, error) {
|
||||
return TestReachableHTTPWithContentTimeoutWithRetriableErrorCodes(ip, port, request, expect, content, []int{}, timeout)
|
||||
}
|
||||
|
||||
func TestReachableHTTPWithContentTimeoutWithRetriableErrorCodes(ip string, port int, request string, expect string, content *bytes.Buffer, retriableErrCodes []int, timeout time.Duration) (bool, error) {
|
||||
|
||||
ipPort := net.JoinHostPort(ip, strconv.Itoa(port))
|
||||
url := fmt.Sprintf("http://%s%s", ipPort, request)
|
||||
if ip == "" {
|
||||
Failf("Got empty IP for reachability check (%s)", url)
|
||||
return false, nil
|
||||
}
|
||||
if port == 0 {
|
||||
Failf("Got port==0 for reachability check (%s)", url)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
Logf("Testing HTTP reachability of %v", url)
|
||||
|
||||
resp, err := httpGetNoConnectionPoolTimeout(url, timeout)
|
||||
if err != nil {
|
||||
Logf("Got error testing for reachability of %s: %v", url, err)
|
||||
return false, nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
Logf("Got error reading response from %s: %v", url, err)
|
||||
return false, nil
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
for _, code := range retriableErrCodes {
|
||||
if resp.StatusCode == code {
|
||||
Logf("Got non-success status %q when trying to access %s, but the error code is retriable", resp.Status, url)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("received non-success return status %q trying to access %s; got body: %s",
|
||||
resp.Status, url, string(body))
|
||||
}
|
||||
if !strings.Contains(string(body), expect) {
|
||||
return false, fmt.Errorf("received response body without expected substring %q: %s", expect, string(body))
|
||||
}
|
||||
if content != nil {
|
||||
content.Write(body)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func TestNotReachableHTTP(ip string, port int) (bool, error) {
|
||||
return TestNotReachableHTTPTimeout(ip, port, 5*time.Second)
|
||||
}
|
||||
|
||||
func TestNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) (bool, error) {
|
||||
ipPort := net.JoinHostPort(ip, strconv.Itoa(port))
|
||||
url := fmt.Sprintf("http://%s", ipPort)
|
||||
if ip == "" {
|
||||
Failf("Got empty IP for non-reachability check (%s)", url)
|
||||
return false, nil
|
||||
}
|
||||
if port == 0 {
|
||||
Failf("Got port==0 for non-reachability check (%s)", url)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
Logf("Testing HTTP non-reachability of %v", url)
|
||||
|
||||
resp, err := httpGetNoConnectionPoolTimeout(url, timeout)
|
||||
if err != nil {
|
||||
Logf("Confirmed that %s is not reachable", url)
|
||||
return true, nil
|
||||
}
|
||||
resp.Body.Close()
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func TestReachableUDP(ip string, port int, request string, expect string) (bool, error) {
|
||||
ipPort := net.JoinHostPort(ip, strconv.Itoa(port))
|
||||
uri := fmt.Sprintf("udp://%s", ipPort)
|
||||
if ip == "" {
|
||||
Failf("Got empty IP for reachability check (%s)", uri)
|
||||
return false, nil
|
||||
}
|
||||
if port == 0 {
|
||||
Failf("Got port==0 for reachability check (%s)", uri)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
Logf("Testing UDP reachability of %v", uri)
|
||||
|
||||
con, err := net.Dial("udp", ipPort)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to dial %s: %v", ipPort, err)
|
||||
}
|
||||
|
||||
_, err = con.Write([]byte(fmt.Sprintf("%s\n", request)))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to send request: %v", err)
|
||||
}
|
||||
|
||||
var buf []byte = make([]byte, len(expect)+1)
|
||||
|
||||
err = con.SetDeadline(time.Now().Add(3 * time.Second))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to set deadline: %v", err)
|
||||
}
|
||||
|
||||
_, err = con.Read(buf)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !strings.Contains(string(buf), expect) {
|
||||
return false, fmt.Errorf("Failed to retrieve %q, got %q", expect, string(buf))
|
||||
}
|
||||
|
||||
Logf("Successfully reached %v", uri)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func TestNotReachableUDP(ip string, port int, request string) (bool, error) {
|
||||
ipPort := net.JoinHostPort(ip, strconv.Itoa(port))
|
||||
uri := fmt.Sprintf("udp://%s", ipPort)
|
||||
if ip == "" {
|
||||
Failf("Got empty IP for reachability check (%s)", uri)
|
||||
return false, nil
|
||||
}
|
||||
if port == 0 {
|
||||
Failf("Got port==0 for reachability check (%s)", uri)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
Logf("Testing UDP non-reachability of %v", uri)
|
||||
|
||||
con, err := net.Dial("udp", ipPort)
|
||||
if err != nil {
|
||||
Logf("Confirmed that %s is not reachable", uri)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
_, err = con.Write([]byte(fmt.Sprintf("%s\n", request)))
|
||||
if err != nil {
|
||||
Logf("Confirmed that %s is not reachable", uri)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var buf []byte = make([]byte, 1)
|
||||
|
||||
err = con.SetDeadline(time.Now().Add(3 * time.Second))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to set deadline: %v", err)
|
||||
}
|
||||
|
||||
_, err = con.Read(buf)
|
||||
if err != nil {
|
||||
Logf("Confirmed that %s is not reachable", uri)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func TestHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error {
|
||||
return TestHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1)
|
||||
}
|
||||
|
||||
func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,
|
||||
countToSucceed int) error {
|
||||
Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)
|
||||
hittedHosts := sets.NewString()
|
||||
count := 0
|
||||
condition := func() (bool, error) {
|
||||
var respBody bytes.Buffer
|
||||
reached, err := TestReachableHTTPWithContentTimeout(externalIP, int(httpPort), "/hostname", "", &respBody,
|
||||
1*time.Second)
|
||||
if err != nil || !reached {
|
||||
return false, nil
|
||||
}
|
||||
hittedHost := strings.TrimSpace(respBody.String())
|
||||
if !expectedHosts.Has(hittedHost) {
|
||||
Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count)
|
||||
count = 0
|
||||
return false, nil
|
||||
}
|
||||
if !hittedHosts.Has(hittedHost) {
|
||||
hittedHosts.Insert(hittedHost)
|
||||
Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts)
|
||||
}
|
||||
if hittedHosts.Equal(expectedHosts) {
|
||||
count++
|
||||
if count >= countToSucceed {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, timeout, condition); err != nil {
|
||||
return fmt.Errorf("error waiting for expectedHosts: %v, hittedHosts: %v, count: %v, expected count: %v",
|
||||
expectedHosts, hittedHosts, count, countToSucceed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Blocks outgoing network traffic on 'node'. Then runs testFunc and returns its status.
|
||||
// At the end (even in case of errors), the network traffic is brought back to normal.
|
||||
// This function executes commands on a node so it will work only for some
|
||||
// environments.
|
||||
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
|
||||
host := GetNodeExternalIP(node)
|
||||
master := GetMasterAddress(c)
|
||||
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
|
||||
defer func() {
|
||||
// This code will execute even if setting the iptables rule failed.
|
||||
// It is on purpose because we may have an error even if the new rule
|
||||
// had been inserted. (yes, we could look at the error code and ssh error
|
||||
// separately, but I prefer to stay on the safe side).
|
||||
By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
|
||||
UnblockNetwork(host, master)
|
||||
}()
|
||||
|
||||
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
|
||||
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
|
||||
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
BlockNetwork(host, master)
|
||||
|
||||
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
|
||||
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
|
||||
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
|
||||
}
|
||||
|
||||
testFunc()
|
||||
// network traffic is unblocked in a deferred function
|
||||
}
|
344
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
Normal file
344
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
Normal file
@ -0,0 +1,344 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func EtcdUpgrade(target_storage, target_version string) error {
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
return etcdUpgradeGCE(target_storage, target_version)
|
||||
default:
|
||||
return fmt.Errorf("EtcdUpgrade() is not implemented for provider %s", TestContext.Provider)
|
||||
}
|
||||
}
|
||||
|
||||
func MasterUpgrade(v string) error {
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
return masterUpgradeGCE(v, false)
|
||||
case "gke":
|
||||
return masterUpgradeGKE(v)
|
||||
case "kubernetes-anywhere":
|
||||
return masterUpgradeKubernetesAnywhere(v)
|
||||
default:
|
||||
return fmt.Errorf("MasterUpgrade() is not implemented for provider %s", TestContext.Provider)
|
||||
}
|
||||
}
|
||||
|
||||
func etcdUpgradeGCE(target_storage, target_version string) error {
|
||||
env := append(
|
||||
os.Environ(),
|
||||
"TEST_ETCD_VERSION="+target_version,
|
||||
"STORAGE_BACKEND="+target_storage,
|
||||
"TEST_ETCD_IMAGE=3.1.10")
|
||||
|
||||
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
|
||||
func MasterUpgradeGCEWithKubeProxyDaemonSet(v string, enableKubeProxyDaemonSet bool) error {
|
||||
return masterUpgradeGCE(v, enableKubeProxyDaemonSet)
|
||||
}
|
||||
|
||||
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
|
||||
func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
|
||||
env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet))
|
||||
// TODO: Remove these variables when they're no longer needed for downgrades.
|
||||
if TestContext.EtcdUpgradeVersion != "" && TestContext.EtcdUpgradeStorage != "" {
|
||||
env = append(env,
|
||||
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
|
||||
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
|
||||
"TEST_ETCD_IMAGE=3.1.10")
|
||||
} else {
|
||||
// In e2e tests, we skip the confirmation prompt about
|
||||
// implicit etcd upgrades to simulate the user entering "y".
|
||||
env = append(env, "TEST_ALLOW_IMPLICIT_ETCD_UPGRADE=true")
|
||||
}
|
||||
|
||||
v := "v" + rawV
|
||||
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-M", v)
|
||||
return err
|
||||
}
|
||||
|
||||
func masterUpgradeGKE(v string) error {
|
||||
Logf("Upgrading master to %q", v)
|
||||
_, _, err := RunCmd("gcloud", "container",
|
||||
"clusters",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
|
||||
"upgrade",
|
||||
TestContext.CloudConfig.Cluster,
|
||||
"--master",
|
||||
fmt.Sprintf("--cluster-version=%s", v),
|
||||
"--quiet")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
waitForSSHTunnels()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func masterUpgradeKubernetesAnywhere(v string) error {
|
||||
Logf("Upgrading master to %q", v)
|
||||
|
||||
kaPath := TestContext.KubernetesAnywherePath
|
||||
originalConfigPath := filepath.Join(kaPath, ".config")
|
||||
backupConfigPath := filepath.Join(kaPath, ".config.bak")
|
||||
updatedConfigPath := filepath.Join(kaPath, fmt.Sprintf(".config-%s", v))
|
||||
|
||||
// modify config with specified k8s version
|
||||
if _, _, err := RunCmd("sed",
|
||||
"-i.bak", // writes original to .config.bak
|
||||
fmt.Sprintf(`s/kubernetes_version=.*$/kubernetes_version=%q/`, v),
|
||||
originalConfigPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// revert .config.bak to .config
|
||||
if err := os.Rename(backupConfigPath, originalConfigPath); err != nil {
|
||||
Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath)
|
||||
}
|
||||
}()
|
||||
|
||||
// invoke ka upgrade
|
||||
if _, _, err := RunCmd("make", "-C", TestContext.KubernetesAnywherePath,
|
||||
"WAIT_FOR_KUBECONFIG=y", "upgrade-master"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// move .config to .config.<version>
|
||||
if err := os.Rename(originalConfigPath, updatedConfigPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NodeUpgrade(f *Framework, v string, img string) error {
|
||||
// Perform the upgrade.
|
||||
var err error
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
err = nodeUpgradeGCE(v, img, false)
|
||||
case "gke":
|
||||
err = nodeUpgradeGKE(v, img)
|
||||
default:
|
||||
err = fmt.Errorf("NodeUpgrade() is not implemented for provider %s", TestContext.Provider)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for it to complete and validate nodes are healthy.
|
||||
//
|
||||
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
|
||||
// GKE; the operation shouldn't return until they all are.
|
||||
Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout)
|
||||
if _, err := CheckNodesReady(f.ClientSet, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
|
||||
func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, img string, enableKubeProxyDaemonSet bool) error {
|
||||
// Perform the upgrade.
|
||||
if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait for it to complete and validate nodes are healthy.
|
||||
Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout)
|
||||
if _, err := CheckNodesReady(f.ClientSet, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
|
||||
func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
|
||||
v := "v" + rawV
|
||||
env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet))
|
||||
if img != "" {
|
||||
env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img)
|
||||
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-N", "-o", v)
|
||||
return err
|
||||
}
|
||||
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-N", v)
|
||||
return err
|
||||
}
|
||||
|
||||
func nodeUpgradeGKE(v string, img string) error {
|
||||
Logf("Upgrading nodes to version %q and image %q", v, img)
|
||||
args := []string{
|
||||
"container",
|
||||
"clusters",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
|
||||
"upgrade",
|
||||
TestContext.CloudConfig.Cluster,
|
||||
fmt.Sprintf("--cluster-version=%s", v),
|
||||
"--quiet",
|
||||
}
|
||||
if len(img) > 0 {
|
||||
args = append(args, fmt.Sprintf("--image-type=%s", img))
|
||||
}
|
||||
_, _, err := RunCmd("gcloud", args...)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
waitForSSHTunnels()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckNodesReady waits up to nt for expect nodes accessed by c to be ready,
|
||||
// returning an error if this doesn't happen in time. It returns the names of
|
||||
// nodes it finds.
|
||||
func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]string, error) {
|
||||
// First, keep getting all of the nodes until we get the number we expect.
|
||||
var nodeList *v1.NodeList
|
||||
var errLast error
|
||||
start := time.Now()
|
||||
found := wait.Poll(Poll, nt, func() (bool, error) {
|
||||
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
|
||||
// knows about all of the nodes. Thus, we retry the list nodes call
|
||||
// until we get the expected number of nodes.
|
||||
nodeList, errLast = c.CoreV1().Nodes().List(metav1.ListOptions{
|
||||
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String()})
|
||||
if errLast != nil {
|
||||
return false, nil
|
||||
}
|
||||
if len(nodeList.Items) != expect {
|
||||
errLast = fmt.Errorf("expected to find %d nodes but found only %d (%v elapsed)",
|
||||
expect, len(nodeList.Items), time.Since(start))
|
||||
Logf("%v", errLast)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}) == nil
|
||||
nodeNames := make([]string, len(nodeList.Items))
|
||||
for i, n := range nodeList.Items {
|
||||
nodeNames[i] = n.ObjectMeta.Name
|
||||
}
|
||||
if !found {
|
||||
return nodeNames, fmt.Errorf("couldn't find %d nodes within %v; last error: %v",
|
||||
expect, nt, errLast)
|
||||
}
|
||||
Logf("Successfully found %d nodes", expect)
|
||||
|
||||
// Next, ensure in parallel that all the nodes are ready. We subtract the
|
||||
// time we spent waiting above.
|
||||
timeout := nt - time.Since(start)
|
||||
result := make(chan bool, len(nodeList.Items))
|
||||
for _, n := range nodeNames {
|
||||
n := n
|
||||
go func() { result <- WaitForNodeToBeReady(c, n, timeout) }()
|
||||
}
|
||||
failed := false
|
||||
// TODO(mbforbes): Change to `for range` syntax once we support only Go
|
||||
// >= 1.4.
|
||||
for i := range nodeList.Items {
|
||||
_ = i
|
||||
if !<-result {
|
||||
failed = true
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
return nodeNames, fmt.Errorf("at least one node failed to be ready")
|
||||
}
|
||||
return nodeNames, nil
|
||||
}
|
||||
|
||||
// MigTemplate (GCE-only) returns the name of the MIG template that the
|
||||
// nodes of the cluster use.
|
||||
func MigTemplate() (string, error) {
|
||||
var errLast error
|
||||
var templ string
|
||||
key := "instanceTemplate"
|
||||
if wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
|
||||
// TODO(mikedanese): make this hit the compute API directly instead of
|
||||
// shelling out to gcloud.
|
||||
// An `instance-groups managed describe` call outputs what we want to stdout.
|
||||
output, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
"describe",
|
||||
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
|
||||
TestContext.CloudConfig.NodeInstanceGroup)
|
||||
if err != nil {
|
||||
errLast = fmt.Errorf("gcloud compute instance-groups managed describe call failed with err: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// The 'describe' call probably succeeded; parse the output and try to
|
||||
// find the line that looks like "instanceTemplate: url/to/<templ>" and
|
||||
// return <templ>.
|
||||
if val := ParseKVLines(output, key); len(val) > 0 {
|
||||
url := strings.Split(val, "/")
|
||||
templ = url[len(url)-1]
|
||||
Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ)
|
||||
return true, nil
|
||||
}
|
||||
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
|
||||
return false, nil
|
||||
}) != nil {
|
||||
return "", fmt.Errorf("MigTemplate() failed with last error: %v", errLast)
|
||||
}
|
||||
return templ, nil
|
||||
}
|
||||
|
||||
func gceUpgradeScript() string {
|
||||
if len(TestContext.GCEUpgradeScript) == 0 {
|
||||
return path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh")
|
||||
}
|
||||
return TestContext.GCEUpgradeScript
|
||||
}
|
||||
|
||||
func waitForSSHTunnels() {
|
||||
Logf("Waiting for SSH tunnels to establish")
|
||||
RunKubectl("run", "ssh-tunnel-test",
|
||||
"--image=busybox",
|
||||
"--restart=Never",
|
||||
"--command", "--",
|
||||
"echo", "Hello")
|
||||
defer RunKubectl("delete", "pod", "ssh-tunnel-test")
|
||||
|
||||
// allow up to a minute for new ssh tunnels to establish
|
||||
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
_, err := RunKubectl("logs", "ssh-tunnel-test")
|
||||
return err == nil, nil
|
||||
})
|
||||
}
|
158
vendor/k8s.io/kubernetes/test/e2e/framework/perf_util.go
generated
vendored
Normal file
158
vendor/k8s.io/kubernetes/test/e2e/framework/perf_util.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/perftype"
|
||||
)
|
||||
|
||||
// TODO(random-liu): Change the tests to actually use PerfData from the beginning instead of
|
||||
// translating one to the other here.
|
||||
|
||||
// currentApiCallMetricsVersion is the current apicall performance metrics version. We should
|
||||
// bump up the version each time we make incompatible change to the metrics.
|
||||
const currentApiCallMetricsVersion = "v1"
|
||||
|
||||
// ApiCallToPerfData transforms APIResponsiveness to PerfData.
|
||||
func ApiCallToPerfData(apicalls *APIResponsiveness) *perftype.PerfData {
|
||||
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
|
||||
for _, apicall := range apicalls.APICalls {
|
||||
item := perftype.DataItem{
|
||||
Data: map[string]float64{
|
||||
"Perc50": float64(apicall.Latency.Perc50) / 1000000, // us -> ms
|
||||
"Perc90": float64(apicall.Latency.Perc90) / 1000000,
|
||||
"Perc99": float64(apicall.Latency.Perc99) / 1000000,
|
||||
},
|
||||
Unit: "ms",
|
||||
Labels: map[string]string{
|
||||
"Verb": apicall.Verb,
|
||||
"Resource": apicall.Resource,
|
||||
"Subresource": apicall.Subresource,
|
||||
"Scope": apicall.Scope,
|
||||
"Count": fmt.Sprintf("%v", apicall.Count),
|
||||
},
|
||||
}
|
||||
perfData.DataItems = append(perfData.DataItems, item)
|
||||
}
|
||||
return perfData
|
||||
}
|
||||
|
||||
// PodStartupLatencyToPerfData transforms PodStartupLatency to PerfData.
|
||||
func PodStartupLatencyToPerfData(latency *PodStartupLatency) *perftype.PerfData {
|
||||
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
|
||||
item := perftype.DataItem{
|
||||
Data: map[string]float64{
|
||||
"Perc50": float64(latency.Latency.Perc50) / 1000000, // us -> ms
|
||||
"Perc90": float64(latency.Latency.Perc90) / 1000000,
|
||||
"Perc99": float64(latency.Latency.Perc99) / 1000000,
|
||||
"Perc100": float64(latency.Latency.Perc100) / 1000000,
|
||||
},
|
||||
Unit: "ms",
|
||||
Labels: map[string]string{
|
||||
"Metric": "pod_startup",
|
||||
},
|
||||
}
|
||||
perfData.DataItems = append(perfData.DataItems, item)
|
||||
return perfData
|
||||
}
|
||||
|
||||
// CurrentKubeletPerfMetricsVersion is the current kubelet performance metrics
|
||||
// version. This is used by mutiple perf related data structures. We should
|
||||
// bump up the version each time we make an incompatible change to the metrics.
|
||||
const CurrentKubeletPerfMetricsVersion = "v2"
|
||||
|
||||
// ResourceUsageToPerfData transforms ResourceUsagePerNode to PerfData. Notice that this function
|
||||
// only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary.
|
||||
func ResourceUsageToPerfData(usagePerNode ResourceUsagePerNode) *perftype.PerfData {
|
||||
return ResourceUsageToPerfDataWithLabels(usagePerNode, nil)
|
||||
}
|
||||
|
||||
// CPUUsageToPerfData transforms NodesCPUSummary to PerfData.
|
||||
func CPUUsageToPerfData(usagePerNode NodesCPUSummary) *perftype.PerfData {
|
||||
return CPUUsageToPerfDataWithLabels(usagePerNode, nil)
|
||||
}
|
||||
|
||||
// PrintPerfData prints the perfdata in json format with PerfResultTag prefix.
|
||||
// If an error occurs, nothing will be printed.
|
||||
func PrintPerfData(p *perftype.PerfData) {
|
||||
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
|
||||
if str := PrettyPrintJSON(p); str != "" {
|
||||
Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceUsageToPerfDataWithLabels transforms ResourceUsagePerNode to PerfData with additional labels.
|
||||
// Notice that this function only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary.
|
||||
func ResourceUsageToPerfDataWithLabels(usagePerNode ResourceUsagePerNode, labels map[string]string) *perftype.PerfData {
|
||||
items := []perftype.DataItem{}
|
||||
for node, usages := range usagePerNode {
|
||||
for c, usage := range usages {
|
||||
item := perftype.DataItem{
|
||||
Data: map[string]float64{
|
||||
"memory": float64(usage.MemoryUsageInBytes) / (1024 * 1024),
|
||||
"workingset": float64(usage.MemoryWorkingSetInBytes) / (1024 * 1024),
|
||||
"rss": float64(usage.MemoryRSSInBytes) / (1024 * 1024),
|
||||
},
|
||||
Unit: "MB",
|
||||
Labels: map[string]string{
|
||||
"node": node,
|
||||
"container": c,
|
||||
"datatype": "resource",
|
||||
"resource": "memory",
|
||||
},
|
||||
}
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
return &perftype.PerfData{
|
||||
Version: CurrentKubeletPerfMetricsVersion,
|
||||
DataItems: items,
|
||||
Labels: labels,
|
||||
}
|
||||
}
|
||||
|
||||
// CPUUsageToPerfDataWithLabels transforms NodesCPUSummary to PerfData with additional labels.
|
||||
func CPUUsageToPerfDataWithLabels(usagePerNode NodesCPUSummary, labels map[string]string) *perftype.PerfData {
|
||||
items := []perftype.DataItem{}
|
||||
for node, usages := range usagePerNode {
|
||||
for c, usage := range usages {
|
||||
data := map[string]float64{}
|
||||
for perc, value := range usage {
|
||||
data[fmt.Sprintf("Perc%02.0f", perc*100)] = value * 1000
|
||||
}
|
||||
|
||||
item := perftype.DataItem{
|
||||
Data: data,
|
||||
Unit: "mCPU",
|
||||
Labels: map[string]string{
|
||||
"node": node,
|
||||
"container": c,
|
||||
"datatype": "resource",
|
||||
"resource": "cpu",
|
||||
},
|
||||
}
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
return &perftype.PerfData{
|
||||
Version: CurrentKubeletPerfMetricsVersion,
|
||||
DataItems: items,
|
||||
Labels: labels,
|
||||
}
|
||||
}
|
261
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
Normal file
261
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
Normal file
@ -0,0 +1,261 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const DefaultPodDeletionTimeout = 3 * time.Minute
|
||||
|
||||
// ImageWhiteList is the images used in the current test suite. It should be initialized in test suite and
|
||||
// the images in the white list should be pre-pulled in the test suite. Currently, this is only used by
|
||||
// node e2e test.
|
||||
var ImageWhiteList sets.String
|
||||
|
||||
// Convenience method for getting a pod client interface in the framework's namespace,
|
||||
// possibly applying test-suite specific transformations to the pod spec, e.g. for
|
||||
// node e2e pod scheduling.
|
||||
func (f *Framework) PodClient() *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name),
|
||||
}
|
||||
}
|
||||
|
||||
// Convenience method for getting a pod client interface in an alternative namespace,
|
||||
// possibly applying test-suite specific transformations to the pod spec, e.g. for
|
||||
// node e2e pod scheduling.
|
||||
func (f *Framework) PodClientNS(namespace string) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(namespace),
|
||||
}
|
||||
}
|
||||
|
||||
type PodClient struct {
|
||||
f *Framework
|
||||
v1core.PodInterface
|
||||
}
|
||||
|
||||
// Create creates a new pod according to the framework specifications (don't wait for it to start).
|
||||
func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
|
||||
c.mungeSpec(pod)
|
||||
p, err := c.PodInterface.Create(pod)
|
||||
ExpectNoError(err, "Error creating Pod")
|
||||
return p
|
||||
}
|
||||
|
||||
// CreateSync creates a new pod according to the framework specifications in the given namespace, and waits for it to start.
|
||||
func (c *PodClient) CreateSyncInNamespace(pod *v1.Pod, namespace string) *v1.Pod {
|
||||
p := c.Create(pod)
|
||||
ExpectNoError(WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
|
||||
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
|
||||
p, err := c.Get(p.Name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
return p
|
||||
}
|
||||
|
||||
// CreateSync creates a new pod according to the framework specifications, and wait for it to start.
|
||||
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
|
||||
return c.CreateSyncInNamespace(pod, c.f.Namespace.Name)
|
||||
}
|
||||
|
||||
// CreateBatch create a batch of pods. All pods are created before waiting.
|
||||
func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
|
||||
ps := make([]*v1.Pod, len(pods))
|
||||
var wg sync.WaitGroup
|
||||
for i, pod := range pods {
|
||||
wg.Add(1)
|
||||
go func(i int, pod *v1.Pod) {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
ps[i] = c.CreateSync(pod)
|
||||
}(i, pod)
|
||||
}
|
||||
wg.Wait()
|
||||
return ps
|
||||
}
|
||||
|
||||
// Update updates the pod object. It retries if there is a conflict, throw out error if
|
||||
// there is any other errors. name is the pod name, updateFn is the function updating the
|
||||
// pod object.
|
||||
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
||||
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
pod, err := c.PodInterface.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
|
||||
}
|
||||
updateFn(pod)
|
||||
_, err = c.PodInterface.Update(pod)
|
||||
if err == nil {
|
||||
Logf("Successfully updated pod %q", name)
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsConflict(err) {
|
||||
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
|
||||
}))
|
||||
}
|
||||
|
||||
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
|
||||
// disappear before the timeout, it will fail the test.
|
||||
func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeout time.Duration) {
|
||||
c.DeleteSyncInNamespace(name, c.f.Namespace.Name, options, timeout)
|
||||
}
|
||||
|
||||
// DeleteSyncInNamespace deletes the pod from the namespace and wait for the pod to disappear for `timeout`. If the pod doesn't
|
||||
// disappear before the timeout, it will fail the test.
|
||||
func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options *metav1.DeleteOptions, timeout time.Duration) {
|
||||
err := c.Delete(name, options)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
Failf("Failed to delete pod %q: %v", name, err)
|
||||
}
|
||||
Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
2*time.Second, timeout)).To(Succeed(), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
// mungeSpec apply test-suite specific transformations to the pod spec.
|
||||
func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
if !TestContext.NodeE2E {
|
||||
return
|
||||
}
|
||||
|
||||
Expect(pod.Spec.NodeName).To(Or(BeZero(), Equal(TestContext.NodeName)), "Test misconfigured")
|
||||
pod.Spec.NodeName = TestContext.NodeName
|
||||
// Node e2e does not support the default DNSClusterFirst policy. Set
|
||||
// the policy to DNSDefault, which is configured per node.
|
||||
pod.Spec.DNSPolicy = v1.DNSDefault
|
||||
|
||||
// PrepullImages only works for node e2e now. For cluster e2e, image prepull is not enforced,
|
||||
// we should not munge ImagePullPolicy for cluster e2e pods.
|
||||
if !TestContext.PrepullImages {
|
||||
return
|
||||
}
|
||||
// If prepull is enabled, munge the container spec to make sure the images are not pulled
|
||||
// during the test.
|
||||
for i := range pod.Spec.Containers {
|
||||
c := &pod.Spec.Containers[i]
|
||||
if c.ImagePullPolicy == v1.PullAlways {
|
||||
// If the image pull policy is PullAlways, the image doesn't need to be in
|
||||
// the white list or pre-pulled, because the image is expected to be pulled
|
||||
// in the test anyway.
|
||||
continue
|
||||
}
|
||||
// If the image policy is not PullAlways, the image must be in the white list and
|
||||
// pre-pulled.
|
||||
Expect(ImageWhiteList.Has(c.Image)).To(BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
|
||||
// Do not pull images during the tests because the images in white list should have
|
||||
// been prepulled.
|
||||
c.ImagePullPolicy = v1.PullNever
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(random-liu): Move pod wait function into this file
|
||||
// WaitForSuccess waits for pod to succeed.
|
||||
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
return true, fmt.Errorf("pod %q failed with reason: %q, message: %q", name, pod.Status.Reason, pod.Status.Message)
|
||||
case v1.PodSucceeded:
|
||||
return true, nil
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to success", name)
|
||||
}
|
||||
|
||||
// WaitForFailure waits for pod to fail.
|
||||
func (c *PodClient) WaitForFailure(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
return true, nil
|
||||
case v1.PodSucceeded:
|
||||
return true, fmt.Errorf("pod %q successed with reason: %q, message: %q", name, pod.Status.Reason, pod.Status.Message)
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to fail", name)
|
||||
}
|
||||
|
||||
// WaitForSuccess waits for pod to succeed or an error event for that pod.
|
||||
func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
|
||||
var ev *v1.Event
|
||||
err := wait.Poll(Poll, PodStartTimeout, func() (bool, error) {
|
||||
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(legacyscheme.Scheme, pod)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error in listing events: %s", err)
|
||||
}
|
||||
for _, e := range evnts.Items {
|
||||
switch e.Reason {
|
||||
case events.KillingContainer, events.FailedToCreateContainer, sysctl.UnsupportedReason, sysctl.ForbiddenReason:
|
||||
ev = &e
|
||||
return true, nil
|
||||
case events.StartedContainer:
|
||||
return true, nil
|
||||
default:
|
||||
// ignore all other errors
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return ev, err
|
||||
}
|
||||
|
||||
// MatchContainerOutput gets output of a container and match expected regexp in the output.
|
||||
func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error {
|
||||
f := c.f
|
||||
output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name)
|
||||
}
|
||||
regex, err := regexp.Compile(expectedRegexp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compile regexp %q: %v", expectedRegexp, err)
|
||||
}
|
||||
if !regex.MatchString(output) {
|
||||
return fmt.Errorf("failed to match regexp %q in output %q", expectedRegexp, output)
|
||||
}
|
||||
return nil
|
||||
}
|
143
vendor/k8s.io/kubernetes/test/e2e/framework/psp_util.go
generated
vendored
Normal file
143
vendor/k8s.io/kubernetes/test/e2e/framework/psp_util.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
const (
|
||||
podSecurityPolicyPrivileged = "e2e-test-privileged-psp"
|
||||
)
|
||||
|
||||
var (
|
||||
isPSPEnabledOnce sync.Once
|
||||
isPSPEnabled bool
|
||||
)
|
||||
|
||||
// Creates a PodSecurityPolicy that allows everything.
|
||||
func PrivilegedPSP(name string) *extensionsv1beta1.PodSecurityPolicy {
|
||||
allowPrivilegeEscalation := true
|
||||
return &extensionsv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{seccomp.AllowedProfilesAnnotationKey: seccomp.AllowAny},
|
||||
},
|
||||
Spec: extensionsv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: true,
|
||||
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
|
||||
AllowedCapabilities: []corev1.Capability{"*"},
|
||||
Volumes: []extensionsv1beta1.FSType{extensionsv1beta1.All},
|
||||
HostNetwork: true,
|
||||
HostPorts: []extensionsv1beta1.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostIPC: true,
|
||||
HostPID: true,
|
||||
RunAsUser: extensionsv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: extensionsv1beta1.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
SELinux: extensionsv1beta1.SELinuxStrategyOptions{
|
||||
Rule: extensionsv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: extensionsv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: extensionsv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: extensionsv1beta1.FSGroupStrategyOptions{
|
||||
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func IsPodSecurityPolicyEnabled(f *Framework) bool {
|
||||
isPSPEnabledOnce.Do(func() {
|
||||
psps, err := f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
|
||||
isPSPEnabled = false
|
||||
} else if psps == nil || len(psps.Items) == 0 {
|
||||
Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
|
||||
isPSPEnabled = false
|
||||
} else {
|
||||
Logf("Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled.")
|
||||
isPSPEnabled = true
|
||||
}
|
||||
})
|
||||
return isPSPEnabled
|
||||
}
|
||||
|
||||
var (
|
||||
privilegedPSPOnce sync.Once
|
||||
)
|
||||
|
||||
func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
|
||||
if !IsPodSecurityPolicyEnabled(f) {
|
||||
return
|
||||
}
|
||||
// Create the privileged PSP & role
|
||||
privilegedPSPOnce.Do(func() {
|
||||
_, err := f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Get(
|
||||
podSecurityPolicyPrivileged, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
// Privileged PSP was already created.
|
||||
ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged)
|
||||
return
|
||||
}
|
||||
|
||||
psp := PrivilegedPSP(podSecurityPolicyPrivileged)
|
||||
psp, err = f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp)
|
||||
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
||||
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = f.ClientSet.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
|
||||
Rules: []rbacv1beta1.PolicyRule{{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{podSecurityPolicyPrivileged},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
})
|
||||
ExpectNoError(err, "Failed to create PSP role")
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
|
||||
podSecurityPolicyPrivileged, namespace))
|
||||
BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(),
|
||||
podSecurityPolicyPrivileged,
|
||||
namespace,
|
||||
rbacv1beta1.Subject{
|
||||
Kind: rbacv1beta1.ServiceAccountKind,
|
||||
Namespace: namespace,
|
||||
Name: "default",
|
||||
})
|
||||
ExpectNoError(WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
|
||||
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
|
||||
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
|
||||
}
|
985
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
Normal file
985
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
Normal file
@ -0,0 +1,985 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
const (
|
||||
PDRetryTimeout = 5 * time.Minute
|
||||
PDRetryPollTime = 5 * time.Second
|
||||
VolumeSelectorKey = "e2e-pv-pool"
|
||||
)
|
||||
|
||||
// Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
|
||||
// guaranteed to be unique. The value is {} (empty struct) since we're only interested
|
||||
// in the PV's name and if it is present. We must always Get the pv object before
|
||||
// referencing any of its values, eg its ClaimRef.
|
||||
type pvval struct{}
|
||||
type PVMap map[string]pvval
|
||||
|
||||
// Map of all PVCs used in the multi pv-pvc tests. The key is "namespace/pvc.Name". The
|
||||
// value is {} (empty struct) since we're only interested in the PVC's name and if it is
|
||||
// present. We must always Get the pvc object before referencing any of its values, eg.
|
||||
// its VolumeName.
|
||||
// Note: It's unsafe to add keys to a map in a loop. Their insertion in the map is
|
||||
// unpredictable and can result in the same key being iterated over again.
|
||||
type pvcval struct{}
|
||||
type PVCMap map[types.NamespacedName]pvcval
|
||||
|
||||
// PersistentVolumeConfig is consumed by MakePersistentVolume() to generate a PV object
|
||||
// for varying storage options (NFS, ceph, glusterFS, etc.).
|
||||
// (+optional) prebind holds a pre-bound PVC
|
||||
// Example pvSource:
|
||||
// pvSource: api.PersistentVolumeSource{
|
||||
// NFS: &api.NFSVolumeSource{
|
||||
// ...
|
||||
// },
|
||||
// }
|
||||
type PersistentVolumeConfig struct {
|
||||
PVSource v1.PersistentVolumeSource
|
||||
Prebind *v1.PersistentVolumeClaim
|
||||
ReclaimPolicy v1.PersistentVolumeReclaimPolicy
|
||||
NamePrefix string
|
||||
Labels labels.Set
|
||||
StorageClassName string
|
||||
NodeAffinity *v1.NodeAffinity
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimConfig is consumed by MakePersistentVolumeClaim() to generate a PVC object.
|
||||
// AccessModes defaults to all modes (RWO, RWX, ROX) if left empty
|
||||
// (+optional) Annotations defines the PVC's annotations
|
||||
|
||||
type PersistentVolumeClaimConfig struct {
|
||||
AccessModes []v1.PersistentVolumeAccessMode
|
||||
Annotations map[string]string
|
||||
Selector *metav1.LabelSelector
|
||||
StorageClassName *string
|
||||
}
|
||||
|
||||
// Clean up a pv and pvc in a single pv/pvc test case.
|
||||
// Note: delete errors are appended to []error so that we can attempt to delete both the pvc and pv.
|
||||
func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error {
|
||||
var errs []error
|
||||
|
||||
if pvc != nil {
|
||||
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
|
||||
}
|
||||
} else {
|
||||
Logf("pvc is nil")
|
||||
}
|
||||
if pv != nil {
|
||||
err := DeletePersistentVolume(c, pv.Name)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
|
||||
}
|
||||
} else {
|
||||
Logf("pv is nil")
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// Clean up pvs and pvcs in multi-pv-pvc test cases. Entries found in the pv and claim maps are
|
||||
// deleted as long as the Delete api call succeeds.
|
||||
// Note: delete errors are appended to []error so that as many pvcs and pvs as possible are deleted.
|
||||
func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap) []error {
|
||||
var errs []error
|
||||
|
||||
for pvcKey := range claims {
|
||||
err := DeletePersistentVolumeClaim(c, pvcKey.Name, ns)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvcKey.Name, err))
|
||||
} else {
|
||||
delete(claims, pvcKey)
|
||||
}
|
||||
}
|
||||
|
||||
for pvKey := range pvols {
|
||||
err := DeletePersistentVolume(c, pvKey)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pvKey, err))
|
||||
} else {
|
||||
delete(pvols, pvKey)
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// Delete the PV.
|
||||
func DeletePersistentVolume(c clientset.Interface, pvName string) error {
|
||||
if c != nil && len(pvName) > 0 {
|
||||
Logf("Deleting PersistentVolume %q", pvName)
|
||||
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
return fmt.Errorf("PV Delete API error: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete the Claim
|
||||
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error {
|
||||
if c != nil && len(pvcName) > 0 {
|
||||
Logf("Deleting PersistentVolumeClaim %q", pvcName)
|
||||
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
return fmt.Errorf("PVC Delete API error: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete the PVC and wait for the PV to enter its expected phase. Validate that the PV
|
||||
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
|
||||
// phase value to expect for the pv bound to the to-be-deleted claim.
|
||||
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
pvname := pvc.Spec.VolumeName
|
||||
Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
|
||||
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for the PV's phase to return to be `expectPVPhase`
|
||||
Logf("Waiting for reclaim process to complete.")
|
||||
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, 1*time.Second, 300*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
|
||||
}
|
||||
|
||||
// examine the pv's ClaimRef and UID and compare to expected values
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
}
|
||||
cr := pv.Spec.ClaimRef
|
||||
if expectPVPhase == v1.VolumeAvailable {
|
||||
if cr != nil && len(cr.UID) > 0 {
|
||||
return fmt.Errorf("PV is 'Available' but ClaimRef.UID is not empty")
|
||||
}
|
||||
} else if expectPVPhase == v1.VolumeBound {
|
||||
if cr == nil {
|
||||
return fmt.Errorf("PV is 'Bound' but ClaimRef is nil")
|
||||
}
|
||||
if len(cr.UID) == 0 {
|
||||
return fmt.Errorf("PV is 'Bound' but ClaimRef.UID is empty")
|
||||
}
|
||||
}
|
||||
|
||||
Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wraps deletePVCandValidatePV() by calling the function in a loop over the PV map. Only bound PVs
|
||||
// are deleted. Validates that the claim was deleted and the PV is in the expected Phase (Released,
|
||||
// Available, Bound).
|
||||
// Note: if there are more claims than pvs then some of the remaining claims may bind to just made
|
||||
// available pvs.
|
||||
func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
var boundPVs, deletedPVCs int
|
||||
|
||||
for pvName := range pvols {
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
}
|
||||
cr := pv.Spec.ClaimRef
|
||||
// if pv is bound then delete the pvc it is bound to
|
||||
if cr != nil && len(cr.Name) > 0 {
|
||||
boundPVs++
|
||||
// Assert bound PVC is tracked in this test. Failing this might
|
||||
// indicate external PVCs interfering with the test.
|
||||
pvcKey := makePvcKey(ns, cr.Name)
|
||||
if _, found := claims[pvcKey]; !found {
|
||||
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
|
||||
}
|
||||
// get the pvc for the delete call below
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !apierrs.IsNotFound(err) {
|
||||
return fmt.Errorf("PVC Get API error: %v", err)
|
||||
}
|
||||
// delete pvckey from map even if apierrs.IsNotFound above is true and thus the
|
||||
// claim was not actually deleted here
|
||||
delete(claims, pvcKey)
|
||||
deletedPVCs++
|
||||
}
|
||||
}
|
||||
if boundPVs != deletedPVCs {
|
||||
return fmt.Errorf("expect number of bound PVs (%v) to equal number of deleted PVCs (%v)", boundPVs, deletedPVCs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create the PV resource. Fails test on error.
|
||||
func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||
pv, err := c.CoreV1().PersistentVolumes().Create(pv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PV Create API error: %v", err)
|
||||
}
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
// create the PVC resource. Fails test on error.
|
||||
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PVC Create API error: %v", err)
|
||||
}
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
// Create a PVC followed by the PV based on the passed in nfs-server ip and
|
||||
// namespace. If the "preBind" bool is true then pre-bind the PV to the PVC
|
||||
// via the PV's ClaimRef. Return the pv and pvc to reflect the created objects.
|
||||
// Note: in the pre-bind case the real PVC name, which is generated, is not
|
||||
// known until after the PVC is instantiated. This is why the pvc is created
|
||||
// before the pv.
|
||||
func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
|
||||
// make the pvc spec
|
||||
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
||||
preBindMsg := ""
|
||||
if preBind {
|
||||
preBindMsg = " pre-bound"
|
||||
pvConfig.Prebind = pvc
|
||||
}
|
||||
// make the pv spec
|
||||
pv := MakePersistentVolume(pvConfig)
|
||||
|
||||
By(fmt.Sprintf("Creating a PVC followed by a%s PV", preBindMsg))
|
||||
pvc, err := CreatePVC(c, ns, pvc)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// instantiate the pv, handle pre-binding by ClaimRef if needed
|
||||
if preBind {
|
||||
pv.Spec.ClaimRef.Name = pvc.Name
|
||||
}
|
||||
pv, err = createPV(c, pv)
|
||||
if err != nil {
|
||||
return nil, pvc, err
|
||||
}
|
||||
return pv, pvc, nil
|
||||
}
|
||||
|
||||
// Create a PV followed by the PVC based on the passed in nfs-server ip and
|
||||
// namespace. If the "preBind" bool is true then pre-bind the PVC to the PV
|
||||
// via the PVC's VolumeName. Return the pv and pvc to reflect the created
|
||||
// objects.
|
||||
// Note: in the pre-bind case the real PV name, which is generated, is not
|
||||
// known until after the PV is instantiated. This is why the pv is created
|
||||
// before the pvc.
|
||||
func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
|
||||
preBindMsg := ""
|
||||
if preBind {
|
||||
preBindMsg = " pre-bound"
|
||||
}
|
||||
Logf("Creating a PV followed by a%s PVC", preBindMsg)
|
||||
|
||||
// make the pv and pvc definitions
|
||||
pv := MakePersistentVolume(pvConfig)
|
||||
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
||||
|
||||
// instantiate the pv
|
||||
pv, err := createPV(c, pv)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// instantiate the pvc, handle pre-binding by VolumeName if needed
|
||||
if preBind {
|
||||
pvc.Spec.VolumeName = pv.Name
|
||||
}
|
||||
pvc, err = CreatePVC(c, ns, pvc)
|
||||
if err != nil {
|
||||
return pv, nil, err
|
||||
}
|
||||
return pv, pvc, nil
|
||||
}
|
||||
|
||||
// Create the desired number of PVs and PVCs and return them in separate maps. If the
|
||||
// number of PVs != the number of PVCs then the min of those two counts is the number of
|
||||
// PVs expected to bind. If a Create error occurs, the returned maps may contain pv and pvc
|
||||
// entries for the resources that were successfully created. In other words, when the caller
|
||||
// sees an error returned, it needs to decide what to do about entries in the maps.
|
||||
// Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However,
|
||||
// orphaned pvs are not deleted and will remain after the suite completes.
|
||||
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
|
||||
pvMap := make(PVMap, numpvs)
|
||||
pvcMap := make(PVCMap, numpvcs)
|
||||
extraPVCs := 0
|
||||
extraPVs := numpvs - numpvcs
|
||||
if extraPVs < 0 {
|
||||
extraPVCs = -extraPVs
|
||||
extraPVs = 0
|
||||
}
|
||||
pvsToCreate := numpvs - extraPVs // want the min(numpvs, numpvcs)
|
||||
|
||||
// create pvs and pvcs
|
||||
for i := 0; i < pvsToCreate; i++ {
|
||||
pv, pvc, err := CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
||||
if err != nil {
|
||||
return pvMap, pvcMap, err
|
||||
}
|
||||
pvMap[pv.Name] = pvval{}
|
||||
pvcMap[makePvcKey(ns, pvc.Name)] = pvcval{}
|
||||
}
|
||||
|
||||
// create extra pvs or pvcs as needed
|
||||
for i := 0; i < extraPVs; i++ {
|
||||
pv := MakePersistentVolume(pvConfig)
|
||||
pv, err := createPV(c, pv)
|
||||
if err != nil {
|
||||
return pvMap, pvcMap, err
|
||||
}
|
||||
pvMap[pv.Name] = pvval{}
|
||||
}
|
||||
for i := 0; i < extraPVCs; i++ {
|
||||
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
||||
pvc, err := CreatePVC(c, ns, pvc)
|
||||
if err != nil {
|
||||
return pvMap, pvcMap, err
|
||||
}
|
||||
pvcMap[makePvcKey(ns, pvc.Name)] = pvcval{}
|
||||
}
|
||||
return pvMap, pvcMap, nil
|
||||
}
|
||||
|
||||
// Wait for the pv and pvc to bind to each other.
|
||||
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
|
||||
// Wait for newly created PVC to bind to the PV
|
||||
Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
|
||||
// since the PVC is already bound.
|
||||
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
|
||||
}
|
||||
|
||||
// Re-get the pv and pvc objects
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
}
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC Get API error: %v", err)
|
||||
}
|
||||
|
||||
// The pv and pvc are both bound, but to each other?
|
||||
// Check that the PersistentVolume.ClaimRef matches the PVC
|
||||
if pv.Spec.ClaimRef == nil {
|
||||
return fmt.Errorf("PV %q ClaimRef is nil", pv.Name)
|
||||
}
|
||||
if pv.Spec.ClaimRef.Name != pvc.Name {
|
||||
return fmt.Errorf("PV %q ClaimRef's name (%q) should be %q", pv.Name, pv.Spec.ClaimRef.Name, pvc.Name)
|
||||
}
|
||||
if pvc.Spec.VolumeName != pv.Name {
|
||||
return fmt.Errorf("PVC %q VolumeName (%q) should be %q", pvc.Name, pvc.Spec.VolumeName, pv.Name)
|
||||
}
|
||||
if pv.Spec.ClaimRef.UID != pvc.UID {
|
||||
return fmt.Errorf("PV %q ClaimRef's UID (%q) should be %q", pv.Name, pv.Spec.ClaimRef.UID, pvc.UID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search for bound PVs and PVCs by examining pvols for non-nil claimRefs.
|
||||
// NOTE: Each iteration waits for a maximum of 3 minutes per PV and, if the PV is bound,
|
||||
// up to 3 minutes for the PVC. When the number of PVs != number of PVCs, this can lead
|
||||
// to situations where the maximum wait times are reached several times in succession,
|
||||
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
|
||||
// small.
|
||||
func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
|
||||
var actualBinds int
|
||||
expectedBinds := len(pvols)
|
||||
if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
|
||||
expectedBinds = len(claims)
|
||||
}
|
||||
|
||||
for pvName := range pvols {
|
||||
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, 3*time.Second, 180*time.Second)
|
||||
if err != nil && len(pvols) > len(claims) {
|
||||
Logf("WARN: pv %v is not bound after max wait", pvName)
|
||||
Logf(" This may be ok since there are more pvs than pvcs")
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV %q did not become Bound: %v", pvName, err)
|
||||
}
|
||||
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Get API error: %v", err)
|
||||
}
|
||||
cr := pv.Spec.ClaimRef
|
||||
if cr != nil && len(cr.Name) > 0 {
|
||||
// Assert bound pvc is a test resource. Failing assertion could
|
||||
// indicate non-test PVC interference or a bug in the test
|
||||
pvcKey := makePvcKey(ns, cr.Name)
|
||||
if _, found := claims[pvcKey]; !found {
|
||||
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
|
||||
}
|
||||
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
|
||||
}
|
||||
actualBinds++
|
||||
}
|
||||
}
|
||||
|
||||
if testExpected && actualBinds != expectedBinds {
|
||||
return fmt.Errorf("expect number of bound PVs (%v) to equal number of claims (%v)", actualBinds, expectedBinds)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Test the pod's exit code to be zero.
|
||||
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
By("Pod should terminate with exitcode 0 (success)")
|
||||
if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
|
||||
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
|
||||
}
|
||||
Logf("Pod %v succeeded ", pod.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
|
||||
// not existing.
|
||||
func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
|
||||
const maxWait = 5 * time.Minute
|
||||
if pod == nil {
|
||||
return nil
|
||||
}
|
||||
Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
|
||||
err := c.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return nil // assume pod was already deleted
|
||||
}
|
||||
return fmt.Errorf("pod Delete API error: %v", err)
|
||||
}
|
||||
Logf("Wait up to %v for pod %q to be fully deleted", maxWait, pod.Name)
|
||||
err = f.WaitForPodNotFound(pod.Name, maxWait)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod %q was not deleted: %v", pod.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create the test pod, wait for (hopefully) success, and then delete the pod.
|
||||
// Note: need named return value so that the err assignment in the defer sets the returned error.
|
||||
// Has been shown to be necessary using Go 1.7.
|
||||
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (err error) {
|
||||
Logf("Creating nfs test pod")
|
||||
pod := MakeWritePod(ns, pvc)
|
||||
runPod, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
delErr := DeletePodWithWait(f, c, runPod)
|
||||
if err == nil { // don't override previous err value
|
||||
err = delErr // assign to returned err, can be nil
|
||||
}
|
||||
}()
|
||||
|
||||
err = testPodSuccessOrFail(c, ns, runPod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
|
||||
}
|
||||
return // note: named return value
|
||||
}
|
||||
|
||||
// Sanity check for GCE testing. Verify the persistent disk attached to the node.
|
||||
func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("GetGCECloud error: %v", err)
|
||||
}
|
||||
isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot verify if GCE disk is attached: %v", err)
|
||||
}
|
||||
return isAttached, nil
|
||||
}
|
||||
|
||||
// Return a pvckey struct.
|
||||
func makePvcKey(ns, name string) types.NamespacedName {
|
||||
return types.NamespacedName{Namespace: ns, Name: name}
|
||||
}
|
||||
|
||||
// Returns a PV definition based on the nfs server IP. If the PVC is not nil
|
||||
// then the PV is defined with a ClaimRef which includes the PVC's namespace.
|
||||
// If the PVC is nil then the PV is not defined with a ClaimRef. If no reclaimPolicy
|
||||
// is assigned, assumes "Retain". Specs are expected to match the test's PVC.
|
||||
// Note: the passed-in claim does not have a name until it is created and thus the PV's
|
||||
// ClaimRef cannot be completely filled-in in this func. Therefore, the ClaimRef's name
|
||||
// is added later in CreatePVCPV.
|
||||
func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume {
|
||||
var claimRef *v1.ObjectReference
|
||||
// If the reclaimPolicy is not provided, assume Retain
|
||||
if pvConfig.ReclaimPolicy == "" {
|
||||
Logf("PV ReclaimPolicy unspecified, default: Retain")
|
||||
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRetain
|
||||
}
|
||||
if pvConfig.Prebind != nil {
|
||||
claimRef = &v1.ObjectReference{
|
||||
Name: pvConfig.Prebind.Name,
|
||||
Namespace: pvConfig.Prebind.Namespace,
|
||||
}
|
||||
}
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: pvConfig.NamePrefix,
|
||||
Labels: pvConfig.Labels,
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeGidAnnotationKey: "777",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: pvConfig.ReclaimPolicy,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
|
||||
},
|
||||
PersistentVolumeSource: pvConfig.PVSource,
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
v1.ReadWriteMany,
|
||||
},
|
||||
ClaimRef: claimRef,
|
||||
StorageClassName: pvConfig.StorageClassName,
|
||||
},
|
||||
}
|
||||
err := helper.StorageNodeAffinityToAlphaAnnotation(pv.Annotations, pvConfig.NodeAffinity)
|
||||
if err != nil {
|
||||
Logf("Setting storage node affinity failed: %v", err)
|
||||
return nil
|
||||
}
|
||||
return pv
|
||||
}
|
||||
|
||||
// Returns a PVC definition based on the namespace.
|
||||
// Note: if this PVC is intended to be pre-bound to a PV, whose name is not
|
||||
// known until the PV is instantiated, then the func CreatePVPVC will add
|
||||
// pvc.Spec.VolumeName to this claim.
|
||||
func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.PersistentVolumeClaim {
|
||||
// Specs are expected to match this test's PersistentVolume
|
||||
|
||||
if len(cfg.AccessModes) == 0 {
|
||||
Logf("AccessModes unspecified, default: all modes (RWO, RWX, ROX).")
|
||||
cfg.AccessModes = append(cfg.AccessModes, v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadOnlyMany)
|
||||
}
|
||||
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
Namespace: ns,
|
||||
Annotations: cfg.Annotations,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Selector: cfg.Selector,
|
||||
AccessModes: cfg.AccessModes,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
StorageClassName: cfg.StorageClassName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createPDWithRetry(zone string) (string, error) {
|
||||
var err error
|
||||
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
|
||||
newDiskName, err := createPD(zone)
|
||||
if err != nil {
|
||||
Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
|
||||
continue
|
||||
}
|
||||
Logf("Successfully created a new PD: %q.", newDiskName)
|
||||
return newDiskName, nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
func CreatePDWithRetry() (string, error) {
|
||||
return createPDWithRetry("")
|
||||
}
|
||||
|
||||
func CreatePDWithRetryAndZone(zone string) (string, error) {
|
||||
return createPDWithRetry(zone)
|
||||
}
|
||||
|
||||
func DeletePDWithRetry(diskName string) error {
|
||||
var err error
|
||||
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
|
||||
err = deletePD(diskName)
|
||||
if err != nil {
|
||||
Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, PDRetryPollTime, err)
|
||||
continue
|
||||
}
|
||||
Logf("Successfully deleted PD %q.", diskName)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
|
||||
}
|
||||
|
||||
func createPD(zone string) (string, error) {
|
||||
if zone == "" {
|
||||
zone = TestContext.CloudConfig.Zone
|
||||
}
|
||||
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
|
||||
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, zone, 10 /* sizeGb */, tags)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return pdName, nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := ec2.New(session.New())
|
||||
|
||||
request := &ec2.CreateVolumeInput{}
|
||||
request.AvailabilityZone = aws.String(zone)
|
||||
request.Size = aws.Int64(10)
|
||||
request.VolumeType = aws.String(awscloud.DefaultVolumeType)
|
||||
response, err := client.CreateVolume(request)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
az := aws.StringValue(response.AvailabilityZone)
|
||||
awsID := aws.StringValue(response.VolumeId)
|
||||
|
||||
volumeName := "aws://" + az + "/" + awsID
|
||||
return volumeName, nil
|
||||
} else if TestContext.Provider == "azure" {
|
||||
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
|
||||
azureCloud, err := GetAzureCloud()
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, diskURI, _, err := azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return diskURI, nil
|
||||
} else {
|
||||
return "", fmt.Errorf("provider does not support volume creation")
|
||||
}
|
||||
}
|
||||
|
||||
func deletePD(pdName string) error {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gceCloud.DeleteDisk(pdName)
|
||||
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
|
||||
// PD already exists, ignore error.
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("error deleting PD %q: %v", pdName, err)
|
||||
}
|
||||
return err
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := ec2.New(session.New())
|
||||
|
||||
tokens := strings.Split(pdName, "/")
|
||||
awsVolumeID := tokens[len(tokens)-1]
|
||||
|
||||
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)}
|
||||
_, err := client.DeleteVolume(request)
|
||||
if err != nil {
|
||||
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
|
||||
Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
|
||||
} else {
|
||||
return fmt.Errorf("error deleting EBS volumes: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
} else if TestContext.Provider == "azure" {
|
||||
azureCloud, err := GetAzureCloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = azureCloud.DeleteVolume(pdName)
|
||||
if err != nil {
|
||||
Logf("failed to delete Azure volume %q: %v", pdName, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("provider does not support volume deletion")
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name.
|
||||
func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
||||
return MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
}
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-tester-",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: BusyBoxImage,
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &isPrivileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Volumes = volumes
|
||||
if nodeSelector != nil {
|
||||
podSpec.Spec.NodeSelector = nodeSelector
|
||||
}
|
||||
return podSpec
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod.
|
||||
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
|
||||
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
}
|
||||
podName := "security-context-" + string(uuid.NewUUID())
|
||||
fsGroup := int64(1000)
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
HostIPC: hostIPC,
|
||||
HostPID: hostPID,
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
FSGroup: &fsGroup,
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &isPrivileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Volumes = volumes
|
||||
podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel
|
||||
return podSpec
|
||||
}
|
||||
|
||||
// CreatePod with given claims based on node selector
|
||||
func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
|
||||
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to be running
|
||||
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// create security pod with given claims
|
||||
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to be running
|
||||
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
|
||||
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
||||
}
|
||||
|
||||
// wait until all pvcs phase set to bound
|
||||
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) {
|
||||
persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims))
|
||||
|
||||
for index, claim := range pvclaims {
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, Poll, timeout)
|
||||
if err != nil {
|
||||
return persistentvolumes, err
|
||||
}
|
||||
// Get new copy of the claim
|
||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err)
|
||||
}
|
||||
// Get the bounded PV
|
||||
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return persistentvolumes, fmt.Errorf("PV Get API error: %v", err)
|
||||
}
|
||||
}
|
||||
return persistentvolumes, nil
|
||||
}
|
||||
|
||||
func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) {
|
||||
diskName, err := CreatePDWithRetryAndZone(zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
return &v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}, nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
return &v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: diskName,
|
||||
FSType: "ext3",
|
||||
},
|
||||
}, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Provider not supported")
|
||||
}
|
||||
}
|
||||
|
||||
func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
return DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
|
||||
} else if TestContext.Provider == "aws" {
|
||||
return DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID)
|
||||
} else {
|
||||
return fmt.Errorf("Provider not supported")
|
||||
}
|
||||
}
|
287
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
Normal file
287
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
Normal file
@ -0,0 +1,287 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// RcByNamePort returns a ReplicationController with specified name and port
|
||||
func RcByNamePort(name string, replicas int32, image string, port int, protocol v1.Protocol,
|
||||
labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
return RcByNameContainer(name, replicas, image, labels, v1.Container{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}},
|
||||
}, gracePeriod)
|
||||
}
|
||||
|
||||
// RcByNameContainer returns a ReplicationControoler with specified name and container
|
||||
func RcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container,
|
||||
gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
zeroGracePeriod := int64(0)
|
||||
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
if gracePeriod == nil {
|
||||
gracePeriod = &zeroGracePeriod
|
||||
}
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{c},
|
||||
TerminationGracePeriodSeconds: gracePeriod,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
||||
// none are running, otherwise it does what a synchronous scale operation would do.
|
||||
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error {
|
||||
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
|
||||
rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(rcs.Items) == 0 {
|
||||
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
|
||||
}
|
||||
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
|
||||
for _, labelRC := range rcs.Items {
|
||||
name := labelRC.Name
|
||||
if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if replicas == 0 {
|
||||
ps, err := podStoreForSelector(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ps.Stop()
|
||||
if err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute); err != nil {
|
||||
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
||||
}
|
||||
} else {
|
||||
if err := testutils.WaitForPodsWithLabelRunning(
|
||||
clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type updateRcFunc func(d *v1.ReplicationController)
|
||||
|
||||
func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
|
||||
var rc *v1.ReplicationController
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rc)
|
||||
if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(rc); err == nil {
|
||||
Logf("Updating replication controller %q", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to rc %q: %v", name, updateErr)
|
||||
}
|
||||
return rc, pollErr
|
||||
}
|
||||
|
||||
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
|
||||
func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, ns, name, size, wait, api.Kind("ReplicationController"))
|
||||
}
|
||||
|
||||
func RunRC(config testutils.RCConfig) error {
|
||||
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutils.RunRC(config)
|
||||
}
|
||||
|
||||
// WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists.
|
||||
// In case of failure or too long waiting time, an error is returned.
|
||||
func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
|
||||
// NodeController evicts pod after 5 minutes, so we need timeout greater than that to observe effects.
|
||||
// The grace period must be set to 0 on the pod for it to be deleted during the partition.
|
||||
// Otherwise, it goes to the 'Terminating' state till the kubelet confirms deletion.
|
||||
return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
|
||||
}
|
||||
|
||||
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
|
||||
func WaitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
|
||||
return !exist, nil
|
||||
} else {
|
||||
Logf("ReplicationController %s in namespace %s found.", name, namespace)
|
||||
return exist, nil
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
|
||||
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForReplicationControllerwithSelector waits until any RC with given selector appears (exist == true), or disappears (exist == false)
|
||||
func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
|
||||
timeout time.Duration) error {
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
rcs, err := c.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
switch {
|
||||
case len(rcs.Items) != 0:
|
||||
Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
|
||||
return exist, nil
|
||||
case len(rcs.Items) == 0:
|
||||
Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
|
||||
return !exist, nil
|
||||
default:
|
||||
Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
|
||||
return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatorFn is the function which is individual tests will implement.
|
||||
// we may want it to return more than just an error, at some point.
|
||||
type validatorFn func(c clientset.Interface, podID string) error
|
||||
|
||||
// ValidateController is a generic mechanism for testing RC's that are running.
|
||||
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
|
||||
// "containername": this is grepped for.
|
||||
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
|
||||
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
|
||||
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
|
||||
func ValidateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
|
||||
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
|
||||
// NB: kubectl adds the "exists" function to the standard template functions.
|
||||
// This lets us check to see if the "running" entry exists for each of the containers
|
||||
// we care about. Exists will never return an error and it's safe to check a chain of
|
||||
// things, any one of which may not exist. In the below template, all of info,
|
||||
// containername, and running might be nil, so the normal index function isn't very
|
||||
// helpful.
|
||||
// This template is unit-tested in kubectl, so if you change it, update the unit test.
|
||||
// You can read about the syntax here: http://golang.org/pkg/text/template/.
|
||||
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
|
||||
|
||||
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
|
||||
|
||||
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
|
||||
waitLoop:
|
||||
for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) {
|
||||
getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns))
|
||||
pods := strings.Fields(getPodsOutput)
|
||||
if numPods := len(pods); numPods != replicas {
|
||||
By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
|
||||
continue
|
||||
}
|
||||
var runningPods []string
|
||||
for _, podID := range pods {
|
||||
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
|
||||
if running != "true" {
|
||||
Logf("%s is created but not running", podID)
|
||||
continue waitLoop
|
||||
}
|
||||
|
||||
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
|
||||
if currentImage != containerImage {
|
||||
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
|
||||
continue waitLoop
|
||||
}
|
||||
|
||||
// Call the generic validator function here.
|
||||
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
|
||||
if err := validator(c, podID); err != nil {
|
||||
Logf("%s is running right image but validator function failed: %v", podID, err)
|
||||
continue waitLoop
|
||||
}
|
||||
|
||||
Logf("%s is verified up and running", podID)
|
||||
runningPods = append(runningPods, podID)
|
||||
}
|
||||
// If we reach here, then all our checks passed.
|
||||
if len(runningPods) == replicas {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
|
||||
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
|
||||
}
|
379
vendor/k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go
generated
vendored
Normal file
379
vendor/k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go
generated
vendored
Normal file
@ -0,0 +1,379 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
)
|
||||
|
||||
type ResourceConstraint struct {
|
||||
CPUConstraint float64
|
||||
MemoryConstraint uint64
|
||||
}
|
||||
|
||||
type SingleContainerSummary struct {
|
||||
Name string
|
||||
Cpu float64
|
||||
Mem uint64
|
||||
}
|
||||
|
||||
// we can't have int here, as JSON does not accept integer keys.
|
||||
type ResourceUsageSummary map[string][]SingleContainerSummary
|
||||
|
||||
func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
for perc, summaries := range *s {
|
||||
buf.WriteString(fmt.Sprintf("%v percentile:\n", perc))
|
||||
fmt.Fprintf(w, "container\tcpu(cores)\tmemory(MB)\n")
|
||||
for _, summary := range summaries {
|
||||
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.Name, summary.Cpu, float64(summary.Mem)/(1024*1024))
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (s *ResourceUsageSummary) PrintJSON() string {
|
||||
return PrettyPrintJSON(*s)
|
||||
}
|
||||
|
||||
func (s *ResourceUsageSummary) SummaryKind() string {
|
||||
return "ResourceUsageSummary"
|
||||
}
|
||||
|
||||
func computePercentiles(timeSeries []ResourceUsagePerContainer, percentilesToCompute []int) map[int]ResourceUsagePerContainer {
|
||||
if len(timeSeries) == 0 {
|
||||
return make(map[int]ResourceUsagePerContainer)
|
||||
}
|
||||
dataMap := make(map[string]*usageDataPerContainer)
|
||||
for i := range timeSeries {
|
||||
for name, data := range timeSeries[i] {
|
||||
if dataMap[name] == nil {
|
||||
dataMap[name] = &usageDataPerContainer{
|
||||
cpuData: make([]float64, 0, len(timeSeries)),
|
||||
memUseData: make([]uint64, 0, len(timeSeries)),
|
||||
memWorkSetData: make([]uint64, 0, len(timeSeries)),
|
||||
}
|
||||
}
|
||||
dataMap[name].cpuData = append(dataMap[name].cpuData, data.CPUUsageInCores)
|
||||
dataMap[name].memUseData = append(dataMap[name].memUseData, data.MemoryUsageInBytes)
|
||||
dataMap[name].memWorkSetData = append(dataMap[name].memWorkSetData, data.MemoryWorkingSetInBytes)
|
||||
}
|
||||
}
|
||||
for _, v := range dataMap {
|
||||
sort.Float64s(v.cpuData)
|
||||
sort.Sort(uint64arr(v.memUseData))
|
||||
sort.Sort(uint64arr(v.memWorkSetData))
|
||||
}
|
||||
|
||||
result := make(map[int]ResourceUsagePerContainer)
|
||||
for _, perc := range percentilesToCompute {
|
||||
data := make(ResourceUsagePerContainer)
|
||||
for k, v := range dataMap {
|
||||
percentileIndex := int(math.Ceil(float64(len(v.cpuData)*perc)/100)) - 1
|
||||
data[k] = &ContainerResourceUsage{
|
||||
Name: k,
|
||||
CPUUsageInCores: v.cpuData[percentileIndex],
|
||||
MemoryUsageInBytes: v.memUseData[percentileIndex],
|
||||
MemoryWorkingSetInBytes: v.memWorkSetData[percentileIndex],
|
||||
}
|
||||
}
|
||||
result[perc] = data
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func leftMergeData(left, right map[int]ResourceUsagePerContainer) map[int]ResourceUsagePerContainer {
|
||||
result := make(map[int]ResourceUsagePerContainer)
|
||||
for percentile, data := range left {
|
||||
result[percentile] = data
|
||||
if _, ok := right[percentile]; !ok {
|
||||
continue
|
||||
}
|
||||
for k, v := range right[percentile] {
|
||||
result[percentile][k] = v
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
type resourceGatherWorker struct {
|
||||
c clientset.Interface
|
||||
nodeName string
|
||||
wg *sync.WaitGroup
|
||||
containerIDs []string
|
||||
stopCh chan struct{}
|
||||
dataSeries []ResourceUsagePerContainer
|
||||
finished bool
|
||||
inKubemark bool
|
||||
resourceDataGatheringPeriod time.Duration
|
||||
probeDuration time.Duration
|
||||
printVerboseLogs bool
|
||||
}
|
||||
|
||||
func (w *resourceGatherWorker) singleProbe() {
|
||||
data := make(ResourceUsagePerContainer)
|
||||
if w.inKubemark {
|
||||
kubemarkData := GetKubemarkMasterComponentsResourceUsage()
|
||||
if data == nil {
|
||||
return
|
||||
}
|
||||
for k, v := range kubemarkData {
|
||||
data[k] = &ContainerResourceUsage{
|
||||
Name: v.Name,
|
||||
MemoryWorkingSetInBytes: v.MemoryWorkingSetInBytes,
|
||||
CPUUsageInCores: v.CPUUsageInCores,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs })
|
||||
if err != nil {
|
||||
Logf("Error while reading data from %v: %v", w.nodeName, err)
|
||||
return
|
||||
}
|
||||
for k, v := range nodeUsage {
|
||||
data[k] = v
|
||||
if w.printVerboseLogs {
|
||||
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.dataSeries = append(w.dataSeries, data)
|
||||
}
|
||||
|
||||
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer w.wg.Done()
|
||||
defer Logf("Closing worker for %v", w.nodeName)
|
||||
defer func() { w.finished = true }()
|
||||
select {
|
||||
case <-time.After(initialSleep):
|
||||
w.singleProbe()
|
||||
for {
|
||||
select {
|
||||
case <-time.After(w.resourceDataGatheringPeriod):
|
||||
w.singleProbe()
|
||||
case <-w.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-w.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type containerResourceGatherer struct {
|
||||
client clientset.Interface
|
||||
stopCh chan struct{}
|
||||
workers []resourceGatherWorker
|
||||
workerWg sync.WaitGroup
|
||||
containerIDs []string
|
||||
options ResourceGathererOptions
|
||||
}
|
||||
|
||||
type ResourceGathererOptions struct {
|
||||
InKubemark bool
|
||||
MasterOnly bool
|
||||
ResourceDataGatheringPeriod time.Duration
|
||||
ProbeDuration time.Duration
|
||||
PrintVerboseLogs bool
|
||||
}
|
||||
|
||||
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*containerResourceGatherer, error) {
|
||||
g := containerResourceGatherer{
|
||||
client: c,
|
||||
stopCh: make(chan struct{}),
|
||||
containerIDs: make([]string, 0),
|
||||
options: options,
|
||||
}
|
||||
|
||||
if options.InKubemark {
|
||||
g.workerWg.Add(1)
|
||||
g.workers = append(g.workers, resourceGatherWorker{
|
||||
inKubemark: true,
|
||||
stopCh: g.stopCh,
|
||||
wg: &g.workerWg,
|
||||
finished: false,
|
||||
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
|
||||
probeDuration: options.ProbeDuration,
|
||||
printVerboseLogs: options.PrintVerboseLogs,
|
||||
})
|
||||
} else {
|
||||
// Tracks kube-system pods if no valid PodList is passed in.
|
||||
var err error
|
||||
if pods == nil {
|
||||
pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Pods: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
for _, container := range pod.Status.InitContainerStatuses {
|
||||
g.containerIDs = append(g.containerIDs, container.Name)
|
||||
}
|
||||
for _, container := range pod.Status.ContainerStatuses {
|
||||
g.containerIDs = append(g.containerIDs, container.Name)
|
||||
}
|
||||
}
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Nodes: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
if !options.MasterOnly || system.IsMasterNode(node.Name) {
|
||||
g.workerWg.Add(1)
|
||||
g.workers = append(g.workers, resourceGatherWorker{
|
||||
c: c,
|
||||
nodeName: node.Name,
|
||||
wg: &g.workerWg,
|
||||
containerIDs: g.containerIDs,
|
||||
stopCh: g.stopCh,
|
||||
finished: false,
|
||||
inKubemark: false,
|
||||
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
|
||||
probeDuration: options.ProbeDuration,
|
||||
printVerboseLogs: options.PrintVerboseLogs,
|
||||
})
|
||||
if options.MasterOnly {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &g, nil
|
||||
}
|
||||
|
||||
// StartGatheringData starts a stat gathering worker blocks for each node to track,
|
||||
// and blocks until StopAndSummarize is called.
|
||||
func (g *containerResourceGatherer) StartGatheringData() {
|
||||
if len(g.workers) == 0 {
|
||||
return
|
||||
}
|
||||
delayPeriod := g.options.ResourceDataGatheringPeriod / time.Duration(len(g.workers))
|
||||
delay := time.Duration(0)
|
||||
for i := range g.workers {
|
||||
go g.workers[i].gather(delay)
|
||||
delay += delayPeriod
|
||||
}
|
||||
g.workerWg.Wait()
|
||||
}
|
||||
|
||||
// StopAndSummarize stops stat gathering workers, processes the collected stats,
|
||||
// generates resource summary for the passed-in percentiles, and returns the summary.
|
||||
// It returns an error if the resource usage at any percentile is beyond the
|
||||
// specified resource constraints.
|
||||
func (g *containerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
|
||||
close(g.stopCh)
|
||||
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
|
||||
finished := make(chan struct{})
|
||||
go func() {
|
||||
g.workerWg.Wait()
|
||||
finished <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-finished:
|
||||
Logf("Waitgroup finished.")
|
||||
case <-time.After(2 * time.Minute):
|
||||
unfinished := make([]string, 0)
|
||||
for i := range g.workers {
|
||||
if !g.workers[i].finished {
|
||||
unfinished = append(unfinished, g.workers[i].nodeName)
|
||||
}
|
||||
}
|
||||
Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
|
||||
}
|
||||
|
||||
if len(percentiles) == 0 {
|
||||
Logf("Warning! Empty percentile list for stopAndPrintData.")
|
||||
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
|
||||
}
|
||||
data := make(map[int]ResourceUsagePerContainer)
|
||||
for i := range g.workers {
|
||||
if g.workers[i].finished {
|
||||
stats := computePercentiles(g.workers[i].dataSeries, percentiles)
|
||||
data = leftMergeData(stats, data)
|
||||
}
|
||||
}
|
||||
|
||||
// Workers has been stopped. We need to gather data stored in them.
|
||||
sortedKeys := []string{}
|
||||
for name := range data[percentiles[0]] {
|
||||
sortedKeys = append(sortedKeys, name)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
violatedConstraints := make([]string, 0)
|
||||
summary := make(ResourceUsageSummary)
|
||||
for _, perc := range percentiles {
|
||||
for _, name := range sortedKeys {
|
||||
usage := data[perc][name]
|
||||
summary[strconv.Itoa(perc)] = append(summary[strconv.Itoa(perc)], SingleContainerSummary{
|
||||
Name: name,
|
||||
Cpu: usage.CPUUsageInCores,
|
||||
Mem: usage.MemoryWorkingSetInBytes,
|
||||
})
|
||||
// Verifying 99th percentile of resource usage
|
||||
if perc == 99 {
|
||||
// Name has a form: <pod_name>/<container_name>
|
||||
containerName := strings.Split(name, "/")[1]
|
||||
if constraint, ok := constraints[containerName]; ok {
|
||||
if usage.CPUUsageInCores > constraint.CPUConstraint {
|
||||
violatedConstraints = append(
|
||||
violatedConstraints,
|
||||
fmt.Sprintf("Container %v is using %v/%v CPU",
|
||||
name,
|
||||
usage.CPUUsageInCores,
|
||||
constraint.CPUConstraint,
|
||||
),
|
||||
)
|
||||
}
|
||||
if usage.MemoryWorkingSetInBytes > constraint.MemoryConstraint {
|
||||
violatedConstraints = append(
|
||||
violatedConstraints,
|
||||
fmt.Sprintf("Container %v is using %v/%v MB of memory",
|
||||
name,
|
||||
float64(usage.MemoryWorkingSetInBytes)/(1024*1024),
|
||||
float64(constraint.MemoryConstraint)/(1024*1024),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(violatedConstraints) > 0 {
|
||||
return &summary, fmt.Errorf(strings.Join(violatedConstraints, "\n"))
|
||||
}
|
||||
return &summary, nil
|
||||
}
|
110
vendor/k8s.io/kubernetes/test/e2e/framework/rs_util.go
generated
vendored
Normal file
110
vendor/k8s.io/kubernetes/test/e2e/framework/rs_util.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
type updateRsFunc func(d *extensions.ReplicaSet)
|
||||
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*extensions.ReplicaSet, error) {
|
||||
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// CheckNewRSAnnotations check if the new RS's annotation is as expected
|
||||
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range expectedAnnotations {
|
||||
// Skip checking revision annotations
|
||||
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
|
||||
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready.
|
||||
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("replicaset %q never became ready", name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
|
||||
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutils.RunReplicaSet(config)
|
||||
}
|
||||
|
||||
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
1468
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
Normal file
1468
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
154
vendor/k8s.io/kubernetes/test/e2e/framework/size.go
generated
vendored
Normal file
154
vendor/k8s.io/kubernetes/test/e2e/framework/size.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
)
|
||||
|
||||
const (
|
||||
resizeNodeReadyTimeout = 2 * time.Minute
|
||||
resizeNodeNotReadyTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
func ResizeGroup(group string, size int32) error {
|
||||
if TestContext.ReportDir != "" {
|
||||
CoreDump(TestContext.ReportDir)
|
||||
defer CoreDump(TestContext.ReportDir)
|
||||
}
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize",
|
||||
group, fmt.Sprintf("--size=%v", size),
|
||||
"--project="+TestContext.CloudConfig.ProjectID, "--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to resize node instance group %s: %s", group, output)
|
||||
}
|
||||
return nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
return awscloud.ResizeInstanceGroup(client, group, int(size))
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.SetNodeGroupSize(group, int(size))
|
||||
} else {
|
||||
return fmt.Errorf("Provider does not support InstanceGroups")
|
||||
}
|
||||
}
|
||||
|
||||
func GetGroupNodes(group string) ([]string, error) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get nodes in instance group %s: %s", group, output)
|
||||
}
|
||||
re := regexp.MustCompile(".*RUNNING")
|
||||
lines := re.FindAllString(string(output), -1)
|
||||
for i, line := range lines {
|
||||
lines[i] = line[:strings.Index(line, " ")]
|
||||
}
|
||||
return lines, nil
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.GetNodeNamesForNodeGroup(group)
|
||||
} else {
|
||||
return nil, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
}
|
||||
|
||||
func GroupSize(group string) (int, error) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Failed to get group size for group %s: %s", group, output)
|
||||
}
|
||||
re := regexp.MustCompile("RUNNING")
|
||||
return len(re.FindAllString(string(output), -1)), nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error describing instance group: %v", err)
|
||||
}
|
||||
if instanceGroup == nil {
|
||||
return -1, fmt.Errorf("instance group not found: %s", group)
|
||||
}
|
||||
return instanceGroup.CurrentSize()
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.GetNodeGroupSize(group)
|
||||
} else {
|
||||
return -1, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
}
|
||||
|
||||
func WaitForGroupSize(group string, size int32) error {
|
||||
timeout := 30 * time.Minute
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
||||
currentSize, err := GroupSize(group)
|
||||
if err != nil {
|
||||
Logf("Failed to get node instance group size: %v", err)
|
||||
continue
|
||||
}
|
||||
if currentSize != int(size) {
|
||||
Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
|
||||
continue
|
||||
}
|
||||
Logf("Node instance group has reached the desired size %d", size)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
|
||||
}
|
||||
|
||||
func getGCEZoneForGroup(group string) (string, error) {
|
||||
zone := TestContext.CloudConfig.Zone
|
||||
if TestContext.CloudConfig.MultiZone {
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "list",
|
||||
"--project="+TestContext.CloudConfig.ProjectID, "--format=value(zone)", "--filter=name="+group).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to get zone for node group %s: %s", group, output)
|
||||
}
|
||||
zone = strings.TrimSpace(string(output))
|
||||
}
|
||||
return zone, nil
|
||||
}
|
894
vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go
generated
vendored
Normal file
894
vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go
generated
vendored
Normal file
@ -0,0 +1,894 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
appsV1beta2 "k8s.io/api/apps/v1beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
// Poll interval for StatefulSet tests
|
||||
StatefulSetPoll = 10 * time.Second
|
||||
// Timeout interval for StatefulSet operations
|
||||
StatefulSetTimeout = 10 * time.Minute
|
||||
// Timeout for stateful pods to change state
|
||||
StatefulPodTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// CreateStatefulSetService creates a Headless Service with Name name and Selector set to match labels.
|
||||
func CreateStatefulSetService(name string, labels map[string]string) *v1.Service {
|
||||
headlessService := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: labels,
|
||||
},
|
||||
}
|
||||
headlessService.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
}
|
||||
headlessService.Spec.ClusterIP = "None"
|
||||
return headlessService
|
||||
}
|
||||
|
||||
// StatefulSetTester is a struct that contains utility methods for testing StatefulSet related functionality. It uses a
|
||||
// clientset.Interface to communicate with the API server.
|
||||
type StatefulSetTester struct {
|
||||
c clientset.Interface
|
||||
}
|
||||
|
||||
// NewStatefulSetTester creates a StatefulSetTester that uses c to interact with the API server.
|
||||
func NewStatefulSetTester(c clientset.Interface) *StatefulSetTester {
|
||||
return &StatefulSetTester{c}
|
||||
}
|
||||
|
||||
// GetStatefulSet gets the StatefulSet named name in namespace.
|
||||
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *apps.StatefulSet {
|
||||
ss, err := s.c.AppsV1beta1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create.
|
||||
func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.StatefulSet {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(manifestPath, file)
|
||||
}
|
||||
|
||||
Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
|
||||
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("Parsing service from %v", mkpath("service.yaml"))
|
||||
svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
||||
_, err = s.c.CoreV1().Services(ns).Create(svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
||||
_, err = s.c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
s.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
return ss
|
||||
}
|
||||
|
||||
// CheckMount checks that the mount at mountPath is valid for all Pods in ss.
|
||||
func (s *StatefulSetTester) CheckMount(ss *apps.StatefulSet, mountPath string) error {
|
||||
for _, cmd := range []string{
|
||||
// Print inode, size etc
|
||||
fmt.Sprintf("ls -idlh %v", mountPath),
|
||||
// Print subdirs
|
||||
fmt.Sprintf("find %v", mountPath),
|
||||
// Try writing
|
||||
fmt.Sprintf("touch %v", filepath.Join(mountPath, fmt.Sprintf("%v", time.Now().UnixNano()))),
|
||||
} {
|
||||
if err := s.ExecInStatefulPods(ss, cmd); err != nil {
|
||||
return fmt.Errorf("failed to execute %v, error: %v", cmd, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecInStatefulPods executes cmd in all Pods in ss. If a error occurs it is returned and cmd is not execute in any subsequent Pods.
|
||||
func (s *StatefulSetTester) ExecInStatefulPods(ss *apps.StatefulSet, cmd string) error {
|
||||
podList := s.GetPodList(ss)
|
||||
for _, statefulPod := range podList.Items {
|
||||
stdout, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckHostname verifies that all Pods in ss have the correct Hostname. If the returned error is not nil than verification failed.
|
||||
func (s *StatefulSetTester) CheckHostname(ss *apps.StatefulSet) error {
|
||||
cmd := "printf $(hostname)"
|
||||
podList := s.GetPodList(ss)
|
||||
for _, statefulPod := range podList.Items {
|
||||
hostname, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hostname != statefulPod.Name {
|
||||
return fmt.Errorf("unexpected hostname (%s) and stateful pod name (%s) not equal", hostname, statefulPod.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Saturate waits for all Pods in ss to become Running and Ready.
|
||||
func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) {
|
||||
var i int32
|
||||
for i = 0; i < *(ss.Spec.Replicas); i++ {
|
||||
Logf("Waiting for stateful pod at index %v to enter Running", i)
|
||||
s.WaitForRunning(i+1, i, ss)
|
||||
Logf("Resuming stateful pod at index %v", i)
|
||||
s.ResumeNextPod(ss)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteStatefulPodAtIndex deletes the Pod with ordinal index in ss.
|
||||
func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *apps.StatefulSet) {
|
||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||
noGrace := int64(0)
|
||||
if err := s.c.CoreV1().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
|
||||
Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyStatefulPodFunc is a func that examines a StatefulSetPod.
|
||||
type VerifyStatefulPodFunc func(*v1.Pod)
|
||||
|
||||
// VerifyPodAtIndex applies a visitor patter to the Pod at index in ss. verify is is applied to the Pod to "visit" it.
|
||||
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *apps.StatefulSet, verify VerifyStatefulPodFunc) {
|
||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||
pod, err := s.c.CoreV1().Pods(ss.Namespace).Get(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name))
|
||||
verify(pod)
|
||||
}
|
||||
|
||||
func getStatefulSetPodNameAtIndex(index int, ss *apps.StatefulSet) string {
|
||||
// TODO: we won't use "-index" as the name strategy forever,
|
||||
// pull the name out from an identity mapper.
|
||||
return fmt.Sprintf("%v-%v", ss.Name, index)
|
||||
}
|
||||
|
||||
// Scale scales ss to count replicas.
|
||||
func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) (*apps.StatefulSet, error) {
|
||||
name := ss.Name
|
||||
ns := ss.Namespace
|
||||
|
||||
Logf("Scaling statefulset %s to %d", name, count)
|
||||
ss = s.update(ns, name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
|
||||
var statefulPodList *v1.PodList
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
statefulPodList = s.GetPodList(ss)
|
||||
if int32(len(statefulPodList.Items)) == count {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
unhealthy := []string{}
|
||||
for _, statefulPod := range statefulPodList.Items {
|
||||
delTs, phase, readiness := statefulPod.DeletionTimestamp, statefulPod.Status.Phase, podutil.IsPodReady(&statefulPod)
|
||||
if delTs != nil || phase != v1.PodRunning || !readiness {
|
||||
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", statefulPod.Name, delTs, phase, readiness))
|
||||
}
|
||||
}
|
||||
return ss, fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, StatefulSetTimeout, unhealthy)
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// UpdateReplicas updates the replicas of ss to count.
|
||||
func (s *StatefulSetTester) UpdateReplicas(ss *apps.StatefulSet, count int32) {
|
||||
s.update(ss.Namespace, ss.Name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
}
|
||||
|
||||
// Restart scales ss to 0 and then back to its previous number of replicas.
|
||||
func (s *StatefulSetTester) Restart(ss *apps.StatefulSet) {
|
||||
oldReplicas := *(ss.Spec.Replicas)
|
||||
ss, err := s.Scale(ss, 0)
|
||||
ExpectNoError(err)
|
||||
// Wait for controller to report the desired number of Pods.
|
||||
// This way we know the controller has observed all Pod deletions
|
||||
// before we scale it back up.
|
||||
s.WaitForStatusReplicas(ss, 0)
|
||||
s.update(ss.Namespace, ss.Name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
|
||||
}
|
||||
|
||||
func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.StatefulSet)) *apps.StatefulSet {
|
||||
for i := 0; i < 3; i++ {
|
||||
ss, err := s.c.AppsV1beta1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("failed to get statefulset %q: %v", name, err)
|
||||
}
|
||||
update(ss)
|
||||
ss, err = s.c.AppsV1beta1().StatefulSets(ns).Update(ss)
|
||||
if err == nil {
|
||||
return ss
|
||||
}
|
||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
||||
Failf("failed to update statefulset %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
Failf("too many retries draining statefulset %q", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPodList gets the current Pods in ss.
|
||||
func (s *StatefulSetTester) GetPodList(ss *apps.StatefulSet) *v1.PodList {
|
||||
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
|
||||
ExpectNoError(err)
|
||||
podList, err := s.c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
ExpectNoError(err)
|
||||
return podList
|
||||
}
|
||||
|
||||
// ConfirmStatefulPodCount asserts that the current number of Pods in ss is count waiting up to timeout for ss to
|
||||
// to scale to count.
|
||||
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.StatefulSet, timeout time.Duration, hard bool) {
|
||||
start := time.Now()
|
||||
deadline := start.Add(timeout)
|
||||
for t := time.Now(); t.Before(deadline); t = time.Now() {
|
||||
podList := s.GetPodList(ss)
|
||||
statefulPodCount := len(podList.Items)
|
||||
if statefulPodCount != count {
|
||||
logPodStates(podList.Items)
|
||||
if hard {
|
||||
Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
|
||||
} else {
|
||||
Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForRunning waits for numPodsRunning in ss to be Running and for the first
|
||||
// numPodsReady ordinals to be Ready.
|
||||
func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *apps.StatefulSet) {
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
podList := s.GetPodList(ss)
|
||||
s.SortStatefulPods(podList)
|
||||
if int32(len(podList.Items)) < numPodsRunning {
|
||||
Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning)
|
||||
return false, nil
|
||||
}
|
||||
if int32(len(podList.Items)) > numPodsRunning {
|
||||
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPodsRunning, len(podList.Items))
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady)
|
||||
isReady := podutil.IsPodReady(&p)
|
||||
desiredReadiness := shouldBeReady == isReady
|
||||
Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
|
||||
if p.Status.Phase != v1.PodRunning || !desiredReadiness {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
Failf("Failed waiting for pods to enter running: %v", pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
|
||||
func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.StatefulSet, *v1.PodList) (bool, error)) {
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := s.c.AppsV1beta1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
podList := s.GetPodList(ssGet)
|
||||
return until(ssGet, podList)
|
||||
})
|
||||
if pollErr != nil {
|
||||
Failf("Failed waiting for state update: %v", pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
|
||||
// The returned StatefulSet contains such a StatefulSetStatus
|
||||
func (s *StatefulSetTester) WaitForStatus(set *apps.StatefulSet) *apps.StatefulSet {
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods *v1.PodList) (bool, error) {
|
||||
if set2.Status.ObservedGeneration != nil && *set2.Status.ObservedGeneration >= set.Generation {
|
||||
set = set2
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return set
|
||||
}
|
||||
|
||||
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready.
|
||||
func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *apps.StatefulSet) {
|
||||
s.WaitForRunning(numStatefulPods, numStatefulPods, ss)
|
||||
}
|
||||
|
||||
// WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition.
|
||||
func (s *StatefulSetTester) WaitForPodReady(set *apps.StatefulSet, podName string) (*apps.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Name == podName {
|
||||
return podutil.IsPodReady(&pods.Items[i]), nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return set, pods
|
||||
|
||||
}
|
||||
|
||||
// WaitForPodNotReady waist for the Pod named podName in set to exist and to not have a Ready condition.
|
||||
func (s *StatefulSetTester) WaitForPodNotReady(set *apps.StatefulSet, podName string) (*apps.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Name == podName {
|
||||
return !podutil.IsPodReady(&pods.Items[i]), nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return set, pods
|
||||
|
||||
}
|
||||
|
||||
// WaitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to
|
||||
// complete. set must have a RollingUpdateStatefulSetStrategyType.
|
||||
func (s *StatefulSetTester) WaitForRollingUpdate(set *apps.StatefulSet) (*apps.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType {
|
||||
Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
set.Spec.UpdateStrategy.Type)
|
||||
}
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
if len(pods.Items) < int(*set.Spec.Replicas) {
|
||||
return false, nil
|
||||
}
|
||||
if set.Status.UpdateRevision != set.Status.CurrentRevision {
|
||||
Logf("Waiting for StatefulSet %s/%s to complete update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
)
|
||||
s.SortStatefulPods(pods)
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return set, pods
|
||||
}
|
||||
|
||||
// WaitForPartitionedRollingUpdate waits for all Pods in set to exist and have the correct revision. set must have
|
||||
// a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less
|
||||
// than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be
|
||||
// at its update revision.
|
||||
func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSet) (*apps.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType {
|
||||
Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
set.Spec.UpdateStrategy.Type)
|
||||
}
|
||||
if set.Spec.UpdateStrategy.RollingUpdate == nil || set.Spec.UpdateStrategy.RollingUpdate.Partition == nil {
|
||||
Failf("StatefulSet %s/%s attempt to wait for partitioned update with nil RollingUpdate or nil Partition",
|
||||
set.Namespace,
|
||||
set.Name)
|
||||
}
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
|
||||
if len(pods.Items) < int(*set.Spec.Replicas) {
|
||||
return false, nil
|
||||
}
|
||||
if partition <= 0 && set.Status.UpdateRevision != set.Status.CurrentRevision {
|
||||
Logf("Waiting for StatefulSet %s/%s to complete update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
)
|
||||
s.SortStatefulPods(pods)
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
} else {
|
||||
for i := int(*set.Spec.Replicas) - 1; i >= partition; i-- {
|
||||
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return set, pods
|
||||
}
|
||||
|
||||
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and not Ready.
|
||||
func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *apps.StatefulSet) {
|
||||
s.WaitForRunning(numStatefulPods, 0, ss)
|
||||
}
|
||||
|
||||
var httpProbe = &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/index.html",
|
||||
Port: intstr.IntOrString{IntVal: 80},
|
||||
},
|
||||
},
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
|
||||
// SetHttpProbe sets the pod template's ReadinessProbe for Nginx StatefulSet containers.
|
||||
// This probe can then be controlled with BreakHttpProbe() and RestoreHttpProbe().
|
||||
// Note that this cannot be used together with PauseNewPods().
|
||||
func (s *StatefulSetTester) SetHttpProbe(ss *apps.StatefulSet) {
|
||||
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = httpProbe
|
||||
}
|
||||
|
||||
// BreakHttpProbe breaks the readiness probe for Nginx StatefulSet containers in ss.
|
||||
func (s *StatefulSetTester) BreakHttpProbe(ss *apps.StatefulSet) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/ || true", path)
|
||||
return s.ExecInStatefulPods(ss, cmd)
|
||||
}
|
||||
|
||||
// BreakPodHttpProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
|
||||
func (s *StatefulSetTester) BreakPodHttpProbe(ss *apps.StatefulSet, pod *v1.Pod) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/ || true", path)
|
||||
stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
|
||||
return err
|
||||
}
|
||||
|
||||
// RestoreHttpProbe restores the readiness probe for Nginx StatefulSet containers in ss.
|
||||
func (s *StatefulSetTester) RestoreHttpProbe(ss *apps.StatefulSet) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/ || true", path)
|
||||
return s.ExecInStatefulPods(ss, cmd)
|
||||
}
|
||||
|
||||
// RestorePodHttpProbe restores the readiness probe for Nginx StatefulSet containers in pod.
|
||||
func (s *StatefulSetTester) RestorePodHttpProbe(ss *apps.StatefulSet, pod *v1.Pod) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/ || true", path)
|
||||
stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
|
||||
return err
|
||||
}
|
||||
|
||||
var pauseProbe = &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
Exec: &v1.ExecAction{Command: []string{"test", "-f", "/data/statefulset-continue"}},
|
||||
},
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
|
||||
func hasPauseProbe(pod *v1.Pod) bool {
|
||||
probe := pod.Spec.Containers[0].ReadinessProbe
|
||||
return probe != nil && reflect.DeepEqual(probe.Exec.Command, pauseProbe.Exec.Command)
|
||||
}
|
||||
|
||||
// PauseNewPods adds an always-failing ReadinessProbe to the StatefulSet PodTemplate.
|
||||
// This causes all newly-created Pods to stay Unready until they are manually resumed
|
||||
// with ResumeNextPod().
|
||||
// Note that this cannot be used together with SetHttpProbe().
|
||||
func (s *StatefulSetTester) PauseNewPods(ss *apps.StatefulSet) {
|
||||
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = pauseProbe
|
||||
}
|
||||
|
||||
// ResumeNextPod allows the next Pod in the StatefulSet to continue by removing the ReadinessProbe
|
||||
// added by PauseNewPods(), if it's still there.
|
||||
// It fails the test if it finds any pods that are not in phase Running,
|
||||
// or if it finds more than one paused Pod existing at the same time.
|
||||
// This is a no-op if there are no paused pods.
|
||||
func (s *StatefulSetTester) ResumeNextPod(ss *apps.StatefulSet) {
|
||||
podList := s.GetPodList(ss)
|
||||
resumedPod := ""
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
|
||||
}
|
||||
if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) {
|
||||
continue
|
||||
}
|
||||
if resumedPod != "" {
|
||||
Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
|
||||
}
|
||||
_, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, "touch /data/statefulset-continue; sync", StatefulSetPoll, StatefulPodTimeout)
|
||||
ExpectNoError(err)
|
||||
Logf("Resumed pod %v", pod.Name)
|
||||
resumedPod = pod.Name
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
|
||||
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
|
||||
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := s.c.AppsV1beta1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if *ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.ReadyReplicas != expectedReplicas {
|
||||
Logf("Waiting for stateful set status.readyReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.ReadyReplicas)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
Failf("Failed waiting for stateful set status.readyReplicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
|
||||
func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
|
||||
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := s.c.AppsV1beta1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if *ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.Replicas != expectedReplicas {
|
||||
Logf("Waiting for stateful set status.replicas to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
Failf("Failed waiting for stateful set status.replicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
|
||||
func (p *StatefulSetTester) CheckServiceName(ss *apps.StatefulSet, expectedServiceName string) error {
|
||||
Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
|
||||
|
||||
if expectedServiceName != ss.Spec.ServiceName {
|
||||
return fmt.Errorf("Wrong service name governing statefulset. Expected %s got %s",
|
||||
expectedServiceName, ss.Spec.ServiceName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SortStatefulPods sorts pods by their ordinals
|
||||
func (s *StatefulSetTester) SortStatefulPods(pods *v1.PodList) {
|
||||
sort.Sort(statefulPodsByOrdinal(pods.Items))
|
||||
}
|
||||
|
||||
// DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns.
|
||||
func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
sst := &StatefulSetTester{c: c}
|
||||
ssList, err := c.AppsV1beta1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
ExpectNoError(err)
|
||||
|
||||
// Scale down each statefulset, then delete it completely.
|
||||
// Deleting a pvc without doing this will leak volumes, #25101.
|
||||
errList := []string{}
|
||||
for i := range ssList.Items {
|
||||
ss := &ssList.Items[i]
|
||||
var err error
|
||||
if ss, err = sst.Scale(ss, 0); err != nil {
|
||||
errList = append(errList, fmt.Sprintf("%v", err))
|
||||
}
|
||||
sst.WaitForStatusReplicas(ss, 0)
|
||||
Logf("Deleting statefulset %v", ss.Name)
|
||||
// Use OrphanDependents=false so it's deleted synchronously.
|
||||
// We already made sure the Pods are gone inside Scale().
|
||||
if err := c.AppsV1beta1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
|
||||
errList = append(errList, fmt.Sprintf("%v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs.
|
||||
pvNames := sets.NewString()
|
||||
// TODO: Don't assume all pvcs in the ns belong to a statefulset
|
||||
pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
Logf("WARNING: Failed to list pvcs, retrying %v", err)
|
||||
return false, nil
|
||||
}
|
||||
for _, pvc := range pvcList.Items {
|
||||
pvNames.Insert(pvc.Spec.VolumeName)
|
||||
// TODO: Double check that there are no pods referencing the pvc
|
||||
Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
|
||||
if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pvcPollErr != nil {
|
||||
errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
|
||||
}
|
||||
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
Logf("WARNING: Failed to list pvs, retrying %v", err)
|
||||
return false, nil
|
||||
}
|
||||
waitingFor := []string{}
|
||||
for _, pv := range pvList.Items {
|
||||
if pvNames.Has(pv.Name) {
|
||||
waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
|
||||
}
|
||||
}
|
||||
if len(waitingFor) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
|
||||
return false, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
|
||||
}
|
||||
if len(errList) != 0 {
|
||||
ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
|
||||
}
|
||||
}
|
||||
|
||||
// NewStatefulSetPVC returns a PersistentVolumeClaim named name, for testing StatefulSets.
|
||||
func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim {
|
||||
return v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewStatefulSet creates a new NGINX StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
|
||||
// statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly
|
||||
// to the Pod. labels are the labels that will be usd for the StatefulSet selector.
|
||||
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *apps.StatefulSet {
|
||||
mounts := append(statefulPodMounts, podMounts...)
|
||||
claims := []v1.PersistentVolumeClaim{}
|
||||
for _, m := range statefulPodMounts {
|
||||
claims = append(claims, NewStatefulSetPVC(m.Name))
|
||||
}
|
||||
|
||||
vols := []v1.Volume{}
|
||||
for _, m := range podMounts {
|
||||
vols = append(vols, v1.Volume{
|
||||
Name: m.Name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: fmt.Sprintf("/tmp/%v", m.Name),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return &apps.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StatefulSet",
|
||||
APIVersion: "apps/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: apps.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
VolumeMounts: mounts,
|
||||
},
|
||||
},
|
||||
Volumes: vols,
|
||||
},
|
||||
},
|
||||
UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
|
||||
VolumeClaimTemplates: claims,
|
||||
ServiceName: governingSvcName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewStatefulSetScale creates a new StatefulSet scale subresource and returns it
|
||||
func NewStatefulSetScale(ss *apps.StatefulSet) *appsV1beta2.Scale {
|
||||
return &appsV1beta2.Scale{
|
||||
// TODO: Create a variant of ObjectMeta type that only contains the fields below.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ss.Name,
|
||||
Namespace: ss.Namespace,
|
||||
},
|
||||
Spec: appsV1beta2.ScaleSpec{
|
||||
Replicas: *(ss.Spec.Replicas),
|
||||
},
|
||||
Status: appsV1beta2.ScaleStatus{
|
||||
Replicas: ss.Status.Replicas,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$")
|
||||
|
||||
func getStatefulPodOrdinal(pod *v1.Pod) int {
|
||||
ordinal := -1
|
||||
subMatches := statefulPodRegex.FindStringSubmatch(pod.Name)
|
||||
if len(subMatches) < 3 {
|
||||
return ordinal
|
||||
}
|
||||
if i, err := strconv.ParseInt(subMatches[2], 10, 32); err == nil {
|
||||
ordinal = int(i)
|
||||
}
|
||||
return ordinal
|
||||
}
|
||||
|
||||
type statefulPodsByOrdinal []v1.Pod
|
||||
|
||||
func (sp statefulPodsByOrdinal) Len() int {
|
||||
return len(sp)
|
||||
}
|
||||
|
||||
func (sp statefulPodsByOrdinal) Swap(i, j int) {
|
||||
sp[i], sp[j] = sp[j], sp[i]
|
||||
}
|
||||
|
||||
func (sp statefulPodsByOrdinal) Less(i, j int) bool {
|
||||
return getStatefulPodOrdinal(&sp[i]) < getStatefulPodOrdinal(&sp[j])
|
||||
}
|
||||
|
||||
type updateStatefulSetFunc func(*apps.StatefulSet)
|
||||
|
||||
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) {
|
||||
statefulSets := c.AppsV1beta1().StatefulSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(statefulSet)
|
||||
if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
|
||||
Logf("Updating stateful set %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %v", name, updateErr)
|
||||
}
|
||||
return statefulSet, pollErr
|
||||
}
|
357
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
Normal file
357
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
Normal file
@ -0,0 +1,357 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/spf13/viper"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
)
|
||||
|
||||
const defaultHost = "http://127.0.0.1:8080"
|
||||
|
||||
type TestContextType struct {
|
||||
KubeConfig string
|
||||
KubemarkExternalKubeConfig string
|
||||
KubeContext string
|
||||
KubeAPIContentType string
|
||||
KubeVolumeDir string
|
||||
CertDir string
|
||||
Host string
|
||||
// TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987.
|
||||
RepoRoot string
|
||||
DockershimCheckpointDir string
|
||||
|
||||
Provider string
|
||||
CloudConfig CloudConfig
|
||||
KubectlPath string
|
||||
OutputDir string
|
||||
ReportDir string
|
||||
ReportPrefix string
|
||||
Prefix string
|
||||
MinStartupPods int
|
||||
// Timeout for waiting for system pods to be running
|
||||
SystemPodsStartupTimeout time.Duration
|
||||
UpgradeTarget string
|
||||
EtcdUpgradeStorage string
|
||||
EtcdUpgradeVersion string
|
||||
UpgradeImage string
|
||||
GCEUpgradeScript string
|
||||
ContainerRuntime string
|
||||
ContainerRuntimeEndpoint string
|
||||
// SystemdServices are comma separated list of systemd services the test framework
|
||||
// will dump logs for.
|
||||
SystemdServices string
|
||||
ImageServiceEndpoint string
|
||||
MasterOSDistro string
|
||||
NodeOSDistro string
|
||||
VerifyServiceAccount bool
|
||||
DeleteNamespace bool
|
||||
DeleteNamespaceOnFailure bool
|
||||
AllowedNotReadyNodes int
|
||||
CleanStart bool
|
||||
// If set to 'true' or 'all' framework will start a goroutine monitoring resource usage of system add-ons.
|
||||
// It will read the data every 30 seconds from all Nodes and print summary during afterEach. If set to 'master'
|
||||
// only master Node will be monitored.
|
||||
GatherKubeSystemResourceUsageData string
|
||||
GatherLogsSizes bool
|
||||
GatherMetricsAfterTest string
|
||||
GatherSuiteMetricsAfterTest bool
|
||||
// If set to 'true' framework will gather ClusterAutoscaler metrics when gathering them for other components.
|
||||
IncludeClusterAutoscalerMetrics bool
|
||||
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
|
||||
OutputPrintType string
|
||||
// NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable.
|
||||
NodeSchedulableTimeout time.Duration
|
||||
// CreateTestingNS is responsible for creating namespace used for executing e2e tests.
|
||||
// It accepts namespace base name, which will be prepended with e2e prefix, kube client
|
||||
// and labels to be applied to a namespace.
|
||||
CreateTestingNS CreateTestingNSFn
|
||||
// If set to true test will dump data about the namespace in which test was running.
|
||||
DumpLogsOnFailure bool
|
||||
// Disables dumping cluster log from master and nodes after all tests.
|
||||
DisableLogDump bool
|
||||
// Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty.
|
||||
LogexporterGCSPath string
|
||||
// If the garbage collector is enabled in the kube-apiserver and kube-controller-manager.
|
||||
GarbageCollectorEnabled bool
|
||||
// FeatureGates is a set of key=value pairs that describe feature gates for alpha/experimental features.
|
||||
FeatureGates string
|
||||
// Node e2e specific test context
|
||||
NodeTestContextType
|
||||
// Monitoring solution that is used in current cluster.
|
||||
ClusterMonitoringMode string
|
||||
|
||||
// Indicates what path the kubernetes-anywhere is installed on
|
||||
KubernetesAnywherePath string
|
||||
|
||||
// Viper-only parameters. These will in time replace all flags.
|
||||
|
||||
// Example: Create a file 'e2e.json' with the following:
|
||||
// "Cadvisor":{
|
||||
// "MaxRetries":"6"
|
||||
// }
|
||||
|
||||
Viper string
|
||||
Cadvisor struct {
|
||||
MaxRetries int
|
||||
SleepDurationMS int
|
||||
}
|
||||
|
||||
LoggingSoak struct {
|
||||
Scale int
|
||||
MilliSecondsBetweenWaves int
|
||||
}
|
||||
}
|
||||
|
||||
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.
|
||||
type NodeTestContextType struct {
|
||||
// NodeE2E indicates whether it is running node e2e.
|
||||
NodeE2E bool
|
||||
// Name of the node to run tests on.
|
||||
NodeName string
|
||||
// NodeConformance indicates whether the test is running in node conformance mode.
|
||||
NodeConformance bool
|
||||
// PrepullImages indicates whether node e2e framework should prepull images.
|
||||
PrepullImages bool
|
||||
// KubeletConfig is the kubelet configuration the test is running against.
|
||||
KubeletConfig kubeletconfig.KubeletConfiguration
|
||||
// ImageDescription is the description of the image on which the test is running.
|
||||
ImageDescription string
|
||||
// SystemSpecName is the name of the system spec (e.g., gke) that's used in
|
||||
// the node e2e test. If empty, the default one (system.DefaultSpec) is
|
||||
// used. The system specs are in test/e2e_node/system/specs/.
|
||||
SystemSpecName string
|
||||
}
|
||||
|
||||
type CloudConfig struct {
|
||||
ApiEndpoint string
|
||||
ProjectID string
|
||||
Zone string // for multizone tests, arbitrarily chosen zone
|
||||
Region string
|
||||
MultiZone bool
|
||||
Cluster string
|
||||
MasterName string
|
||||
NodeInstanceGroup string // comma-delimited list of groups' names
|
||||
NumNodes int
|
||||
ClusterIPRange string
|
||||
ClusterTag string
|
||||
Network string
|
||||
ConfigFile string // for azure and openstack
|
||||
NodeTag string
|
||||
MasterTag string
|
||||
|
||||
Provider cloudprovider.Interface
|
||||
KubemarkController *kubemark.KubemarkController
|
||||
}
|
||||
|
||||
var TestContext TestContextType
|
||||
|
||||
// Register flags common to all e2e test suites.
|
||||
func RegisterCommonFlags() {
|
||||
// Turn on verbose by default to get spec names
|
||||
config.DefaultReporterConfig.Verbose = true
|
||||
|
||||
// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
|
||||
config.GinkgoConfig.EmitSpecProgress = true
|
||||
|
||||
// Randomize specs as well as suites
|
||||
config.GinkgoConfig.RandomizeAllSpecs = true
|
||||
|
||||
flag.StringVar(&TestContext.GatherKubeSystemResourceUsageData, "gather-resource-usage", "false", "If set to 'true' or 'all' framework will be monitoring resource usage of system all add-ons in (some) e2e tests, if set to 'master' framework will be monitoring master node only, if set to 'none' of 'false' monitoring will be turned off.")
|
||||
flag.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.")
|
||||
flag.StringVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", "false", "If set to 'true' framework will gather metrics from all components after each test. If set to 'master' only master component metrics would be gathered.")
|
||||
flag.BoolVar(&TestContext.GatherSuiteMetricsAfterTest, "gather-suite-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after the whole test suite completes.")
|
||||
flag.BoolVar(&TestContext.IncludeClusterAutoscalerMetrics, "include-cluster-autoscaler", false, "If set to true, framework will include Cluster Autoscaler when gathering metrics.")
|
||||
flag.StringVar(&TestContext.OutputPrintType, "output-print-type", "json", "Format in which summaries should be printed: 'hr' for human readable, 'json' for JSON ones.")
|
||||
flag.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
|
||||
flag.BoolVar(&TestContext.DisableLogDump, "disable-log-dump", false, "If set to true, logs from master and nodes won't be gathered after test run.")
|
||||
flag.StringVar(&TestContext.LogexporterGCSPath, "logexporter-gcs-path", "", "Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty.")
|
||||
flag.BoolVar(&TestContext.DeleteNamespace, "delete-namespace", true, "If true tests will delete namespace after completion. It is only designed to make debugging easier, DO NOT turn it off by default.")
|
||||
flag.BoolVar(&TestContext.DeleteNamespaceOnFailure, "delete-namespace-on-failure", true, "If true, framework will delete test namespace on failure. Used only during test debugging.")
|
||||
flag.IntVar(&TestContext.AllowedNotReadyNodes, "allowed-not-ready-nodes", 0, "If non-zero, framework will allow for that many non-ready nodes when checking for all ready nodes.")
|
||||
|
||||
flag.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set", defaultHost))
|
||||
flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
|
||||
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
|
||||
flag.StringVar(&TestContext.FeatureGates, "feature-gates", "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
flag.StringVar(&TestContext.Viper, "viper-config", "e2e", "The name of the viper config i.e. 'e2e' will read values from 'e2e.json' locally. All e2e parameters are meant to be configurable by viper.")
|
||||
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/rkt/remote).")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "", "The container runtime endpoint of cluster VM instances.")
|
||||
flag.StringVar(&TestContext.SystemdServices, "systemd-services", "docker", "The comma separated list of systemd services the framework will dump logs for.")
|
||||
flag.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.")
|
||||
flag.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.")
|
||||
flag.StringVar(&TestContext.KubernetesAnywherePath, "kubernetes-anywhere-path", "/workspace/kubernetes-anywhere", "Which directory kubernetes-anywhere is installed to.")
|
||||
}
|
||||
|
||||
// Register flags specific to the cluster e2e test suite.
|
||||
func RegisterClusterFlags() {
|
||||
flag.BoolVar(&TestContext.VerifyServiceAccount, "e2e-verify-service-account", true, "If true tests will verify the service account before running.")
|
||||
flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.")
|
||||
flag.StringVar(&TestContext.KubemarkExternalKubeConfig, fmt.Sprintf("%s-%s", "kubemark-external", clientcmd.RecommendedConfigPathFlag), "", "Path to kubeconfig containing embedded authinfo for external cluster.")
|
||||
flag.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'")
|
||||
flag.StringVar(&TestContext.KubeAPIContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType used to communicate with apiserver")
|
||||
|
||||
flag.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.")
|
||||
flag.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
|
||||
flag.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
|
||||
flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, vagrant, etc.)")
|
||||
flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
|
||||
flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
|
||||
flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
|
||||
flag.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, trusty, or coreos).")
|
||||
flag.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, trusty, or coreos).")
|
||||
flag.StringVar(&TestContext.ClusterMonitoringMode, "cluster-monitoring-mode", "influxdb", "The monitoring solution that is used in the cluster.")
|
||||
|
||||
// TODO: Flags per provider? Rename gce-project/gce-zone?
|
||||
cloudConfig := &TestContext.CloudConfig
|
||||
flag.StringVar(&cloudConfig.MasterName, "kube-master", "", "Name of the kubernetes master. Only required if provider is gce or gke")
|
||||
flag.StringVar(&cloudConfig.ApiEndpoint, "gce-api-endpoint", "", "The GCE ApiEndpoint being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.Region, "gce-region", "", "GCE region being used, if applicable")
|
||||
flag.BoolVar(&cloudConfig.MultiZone, "gce-multizone", false, "If true, start GCE cloud provider with multizone support.")
|
||||
flag.StringVar(&cloudConfig.Cluster, "gke-cluster", "", "GKE name of cluster being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.NodeInstanceGroup, "node-instance-group", "", "Name of the managed instance group for nodes. Valid only for gce, gke or aws. If there is more than one group: comma separated list of groups.")
|
||||
flag.StringVar(&cloudConfig.Network, "network", "e2e", "The cloud provider network for this e2e cluster.")
|
||||
flag.IntVar(&cloudConfig.NumNodes, "num-nodes", -1, "Number of nodes in the cluster")
|
||||
flag.StringVar(&cloudConfig.ClusterIPRange, "cluster-ip-range", "10.64.0.0/14", "A CIDR notation IP range from which to assign IPs in the cluster.")
|
||||
flag.StringVar(&cloudConfig.NodeTag, "node-tag", "", "Network tags used on node instances. Valid only for gce, gke")
|
||||
flag.StringVar(&cloudConfig.MasterTag, "master-tag", "", "Network tags used on master instances. Valid only for gce, gke")
|
||||
|
||||
flag.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.")
|
||||
flag.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure.")
|
||||
flag.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
|
||||
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
|
||||
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 4*time.Hour, "Timeout for waiting for all nodes to be schedulable.")
|
||||
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.UpgradeImage, "upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
|
||||
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
|
||||
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
|
||||
flag.BoolVar(&TestContext.GarbageCollectorEnabled, "garbage-collector-enabled", true, "Set to true if the garbage collector is enabled in the kube-apiserver and kube-controller-manager, then some tests will rely on the garbage collector to delete dependent resources.")
|
||||
}
|
||||
|
||||
// Register flags specific to the node e2e test suite.
|
||||
func RegisterNodeFlags() {
|
||||
// Mark the test as node e2e when node flags are api.Registry.
|
||||
TestContext.NodeE2E = true
|
||||
flag.StringVar(&TestContext.NodeName, "node-name", "", "Name of the node to run tests on.")
|
||||
// TODO(random-liu): Move kubelet start logic out of the test.
|
||||
// TODO(random-liu): Move log fetch logic out of the test.
|
||||
// There are different ways to start kubelet (systemd, initd, docker, rkt, manually started etc.)
|
||||
// and manage logs (journald, upstart etc.).
|
||||
// For different situation we need to mount different things into the container, run different commands.
|
||||
// It is hard and unnecessary to deal with the complexity inside the test suite.
|
||||
flag.BoolVar(&TestContext.NodeConformance, "conformance", false, "If true, the test suite will not start kubelet, and fetch system log (kernel, docker, kubelet log etc.) to the report directory.")
|
||||
flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")
|
||||
flag.StringVar(&TestContext.ImageDescription, "image-description", "", "The description of the image which the test will be running on.")
|
||||
flag.StringVar(&TestContext.SystemSpecName, "system-spec-name", "", "The name of the system spec (e.g., gke) that's used in the node e2e test. The system specs are in test/e2e_node/system/specs/. This is used by the test framework to determine which tests to run for validating the system requirements.")
|
||||
}
|
||||
|
||||
// ViperizeFlags sets up all flag and config processing. Future configuration info should be added to viper, not to flags.
|
||||
func ViperizeFlags() {
|
||||
|
||||
// Part 1: Set regular flags.
|
||||
// TODO: Future, lets eliminate e2e 'flag' deps entirely in favor of viper only,
|
||||
// since go test 'flag's are sort of incompatible w/ flag, glog, etc.
|
||||
RegisterCommonFlags()
|
||||
RegisterClusterFlags()
|
||||
flag.Parse()
|
||||
|
||||
// Part 2: Set Viper provided flags.
|
||||
// This must be done after common flags are registered, since Viper is a flag option.
|
||||
viper.SetConfigName(TestContext.Viper)
|
||||
viper.AddConfigPath(".")
|
||||
viper.ReadInConfig()
|
||||
|
||||
// TODO Consider wether or not we want to use overwriteFlagsWithViperConfig().
|
||||
viper.Unmarshal(&TestContext)
|
||||
|
||||
AfterReadingAllFlags(&TestContext)
|
||||
}
|
||||
|
||||
func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
|
||||
clusterNick := "cluster"
|
||||
userNick := "user"
|
||||
contextNick := "context"
|
||||
|
||||
config := clientcmdapi.NewConfig()
|
||||
|
||||
credentials := clientcmdapi.NewAuthInfo()
|
||||
credentials.Token = clientCfg.BearerToken
|
||||
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
|
||||
if len(credentials.ClientCertificate) == 0 {
|
||||
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData
|
||||
}
|
||||
credentials.ClientKey = clientCfg.TLSClientConfig.KeyFile
|
||||
if len(credentials.ClientKey) == 0 {
|
||||
credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData
|
||||
}
|
||||
config.AuthInfos[userNick] = credentials
|
||||
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.Server = clientCfg.Host
|
||||
cluster.CertificateAuthority = clientCfg.CAFile
|
||||
if len(cluster.CertificateAuthority) == 0 {
|
||||
cluster.CertificateAuthorityData = clientCfg.CAData
|
||||
}
|
||||
cluster.InsecureSkipTLSVerify = clientCfg.Insecure
|
||||
config.Clusters[clusterNick] = cluster
|
||||
|
||||
context := clientcmdapi.NewContext()
|
||||
context.Cluster = clusterNick
|
||||
context.AuthInfo = userNick
|
||||
config.Contexts[contextNick] = context
|
||||
config.CurrentContext = contextNick
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// AfterReadingAllFlags makes changes to the context after all flags
|
||||
// have been read.
|
||||
func AfterReadingAllFlags(t *TestContextType) {
|
||||
// Only set a default host if one won't be supplied via kubeconfig
|
||||
if len(t.Host) == 0 && len(t.KubeConfig) == 0 {
|
||||
// Check if we can use the in-cluster config
|
||||
if clusterConfig, err := restclient.InClusterConfig(); err == nil {
|
||||
if tempFile, err := ioutil.TempFile(os.TempDir(), "kubeconfig-"); err == nil {
|
||||
kubeConfig := createKubeConfig(clusterConfig)
|
||||
clientcmd.WriteToFile(*kubeConfig, tempFile.Name())
|
||||
t.KubeConfig = tempFile.Name()
|
||||
glog.Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name())
|
||||
}
|
||||
}
|
||||
if len(t.KubeConfig) == 0 {
|
||||
glog.Warningf("Unable to find in-cluster config, using default host : %s", defaultHost)
|
||||
t.Host = defaultHost
|
||||
}
|
||||
}
|
||||
}
|
34
vendor/k8s.io/kubernetes/test/e2e/framework/timer/BUILD
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/test/e2e/framework/timer/BUILD
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["timer.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/timer",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["timer_test.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/timer",
|
||||
library = ":go_default_library",
|
||||
deps = ["//vendor/github.com/onsi/gomega:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
126
vendor/k8s.io/kubernetes/test/e2e/framework/timer/timer.go
generated
vendored
Normal file
126
vendor/k8s.io/kubernetes/test/e2e/framework/timer/timer.go
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package timer
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/perftype"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var now = time.Now
|
||||
|
||||
// Represents a phase of a test. Phases can overlap.
|
||||
type Phase struct {
|
||||
sequenceNumber int
|
||||
name string
|
||||
startTime time.Time
|
||||
endTime time.Time
|
||||
}
|
||||
|
||||
func (phase *Phase) ended() bool {
|
||||
return !phase.endTime.IsZero()
|
||||
}
|
||||
|
||||
// End marks the phase as ended, unless it had already been ended before.
|
||||
func (phase *Phase) End() {
|
||||
if !phase.ended() {
|
||||
phase.endTime = now()
|
||||
}
|
||||
}
|
||||
|
||||
func (phase *Phase) label() string {
|
||||
return fmt.Sprintf("%03d-%s", phase.sequenceNumber, phase.name)
|
||||
}
|
||||
|
||||
func (phase *Phase) duration() time.Duration {
|
||||
endTime := phase.endTime
|
||||
if !phase.ended() {
|
||||
endTime = now()
|
||||
}
|
||||
return endTime.Sub(phase.startTime)
|
||||
}
|
||||
|
||||
func (phase *Phase) humanReadable() string {
|
||||
if phase.ended() {
|
||||
return fmt.Sprintf("Phase %s: %v\n", phase.label(), phase.duration())
|
||||
} else {
|
||||
return fmt.Sprintf("Phase %s: %v so far\n", phase.label(), phase.duration())
|
||||
}
|
||||
}
|
||||
|
||||
// A TestPhaseTimer groups phases and provides a way to export their measurements as JSON or human-readable text.
|
||||
// It is safe to use concurrently.
|
||||
type TestPhaseTimer struct {
|
||||
lock sync.Mutex
|
||||
phases []*Phase
|
||||
}
|
||||
|
||||
// NewTestPhaseTimer creates a new TestPhaseTimer.
|
||||
func NewTestPhaseTimer() *TestPhaseTimer {
|
||||
return &TestPhaseTimer{}
|
||||
}
|
||||
|
||||
// StartPhase starts a new phase.
|
||||
// sequenceNumber is an integer prepended to phaseName in the output, such that lexicographic sorting
|
||||
// of phases in perfdash reconstructs the order of execution. Unfortunately it needs to be
|
||||
// provided manually, since a simple incrementing counter would have the effect that inserting
|
||||
// a new phase would renumber subsequent phases, breaking the continuity of historical records.
|
||||
func (timer *TestPhaseTimer) StartPhase(sequenceNumber int, phaseName string) *Phase {
|
||||
timer.lock.Lock()
|
||||
defer timer.lock.Unlock()
|
||||
newPhase := &Phase{sequenceNumber: sequenceNumber, name: phaseName, startTime: now()}
|
||||
timer.phases = append(timer.phases, newPhase)
|
||||
return newPhase
|
||||
}
|
||||
|
||||
func (timer *TestPhaseTimer) SummaryKind() string {
|
||||
return "TestPhaseTimer"
|
||||
}
|
||||
|
||||
func (timer *TestPhaseTimer) PrintHumanReadable() string {
|
||||
buf := bytes.Buffer{}
|
||||
timer.lock.Lock()
|
||||
defer timer.lock.Unlock()
|
||||
for _, phase := range timer.phases {
|
||||
buf.WriteString(phase.humanReadable())
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (timer *TestPhaseTimer) PrintJSON() string {
|
||||
data := perftype.PerfData{
|
||||
Version: "v1",
|
||||
DataItems: []perftype.DataItem{{
|
||||
Unit: "s",
|
||||
Labels: map[string]string{"test": "phases"},
|
||||
Data: make(map[string]float64)}}}
|
||||
timer.lock.Lock()
|
||||
defer timer.lock.Unlock()
|
||||
for _, phase := range timer.phases {
|
||||
data.DataItems[0].Data[phase.label()] = phase.duration().Seconds()
|
||||
if !phase.ended() {
|
||||
data.DataItems[0].Labels["ended"] = "false"
|
||||
}
|
||||
}
|
||||
return framework.PrettyPrintJSON(data)
|
||||
}
|
92
vendor/k8s.io/kubernetes/test/e2e/framework/timer/timer_test.go
generated
vendored
Normal file
92
vendor/k8s.io/kubernetes/test/e2e/framework/timer/timer_test.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package timer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var currentTime time.Time
|
||||
|
||||
func init() {
|
||||
setCurrentTimeSinceEpoch(0)
|
||||
now = func() time.Time { return currentTime }
|
||||
}
|
||||
|
||||
func setCurrentTimeSinceEpoch(duration time.Duration) {
|
||||
currentTime = time.Unix(0, duration.Nanoseconds())
|
||||
}
|
||||
|
||||
func testUsageWithDefer(timer *TestPhaseTimer) {
|
||||
defer timer.StartPhase(33, "two").End()
|
||||
setCurrentTimeSinceEpoch(6*time.Second + 500*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestTimer(t *testing.T) {
|
||||
RegisterTestingT(t)
|
||||
|
||||
timer := NewTestPhaseTimer()
|
||||
setCurrentTimeSinceEpoch(1 * time.Second)
|
||||
phaseOne := timer.StartPhase(1, "one")
|
||||
setCurrentTimeSinceEpoch(3 * time.Second)
|
||||
testUsageWithDefer(timer)
|
||||
|
||||
Expect(timer.PrintJSON()).To(MatchJSON(`{
|
||||
"version": "v1",
|
||||
"dataItems": [
|
||||
{
|
||||
"data": {
|
||||
"001-one": 5.5,
|
||||
"033-two": 3.5
|
||||
},
|
||||
"unit": "s",
|
||||
"labels": {
|
||||
"test": "phases",
|
||||
"ended": "false"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`))
|
||||
Expect(timer.PrintHumanReadable()).To(Equal(`Phase 001-one: 5.5s so far
|
||||
Phase 033-two: 3.5s
|
||||
`))
|
||||
|
||||
setCurrentTimeSinceEpoch(7*time.Second + 500*time.Millisecond)
|
||||
phaseOne.End()
|
||||
|
||||
Expect(timer.PrintJSON()).To(MatchJSON(`{
|
||||
"version": "v1",
|
||||
"dataItems": [
|
||||
{
|
||||
"data": {
|
||||
"001-one": 6.5,
|
||||
"033-two": 3.5
|
||||
},
|
||||
"unit": "s",
|
||||
"labels": {
|
||||
"test": "phases"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`))
|
||||
Expect(timer.PrintHumanReadable()).To(Equal(`Phase 001-one: 6.5s
|
||||
Phase 033-two: 3.5s
|
||||
`))
|
||||
}
|
105
vendor/k8s.io/kubernetes/test/e2e/framework/upgrade_util.go
generated
vendored
Normal file
105
vendor/k8s.io/kubernetes/test/e2e/framework/upgrade_util.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// RealVersion turns a version constants into a version string deployable on
|
||||
// GKE. See hack/get-build.sh for more information.
|
||||
func RealVersion(s string) (string, error) {
|
||||
Logf("Getting real version for %q", s)
|
||||
v, _, err := RunCmd(path.Join(TestContext.RepoRoot, "hack/get-build.sh"), "-v", s)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
Logf("Version for %q is %q", s, v)
|
||||
return strings.TrimPrefix(strings.TrimSpace(v), "v"), nil
|
||||
}
|
||||
|
||||
func traceRouteToMaster() {
|
||||
path, err := exec.LookPath("traceroute")
|
||||
if err != nil {
|
||||
Logf("Could not find traceroute program")
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(path, "-I", GetMasterHost())
|
||||
out, err := cmd.Output()
|
||||
if len(out) != 0 {
|
||||
Logf(string(out))
|
||||
}
|
||||
if exiterr, ok := err.(*exec.ExitError); err != nil && ok {
|
||||
Logf("error while running traceroute: %s", exiterr.Stderr)
|
||||
}
|
||||
}
|
||||
|
||||
func CheckMasterVersion(c clientset.Interface, want string) error {
|
||||
Logf("Checking master version")
|
||||
var err error
|
||||
var v *version.Info
|
||||
waitErr := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
v, err = c.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
traceRouteToMaster()
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if waitErr != nil {
|
||||
return fmt.Errorf("CheckMasterVersion() couldn't get the master version: %v", err)
|
||||
}
|
||||
// We do prefix trimming and then matching because:
|
||||
// want looks like: 0.19.3-815-g50e67d4
|
||||
// got looks like: v0.19.3-815-g50e67d4034e858-dirty
|
||||
got := strings.TrimPrefix(v.GitVersion, "v")
|
||||
if !strings.HasPrefix(got, want) {
|
||||
return fmt.Errorf("master had kube-apiserver version %s which does not start with %s",
|
||||
got, want)
|
||||
}
|
||||
Logf("Master is at version %s", want)
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckNodesVersions(cs clientset.Interface, want string) error {
|
||||
l := GetReadySchedulableNodesOrDie(cs)
|
||||
for _, n := range l.Items {
|
||||
// We do prefix trimming and then matching because:
|
||||
// want looks like: 0.19.3-815-g50e67d4
|
||||
// kv/kvp look like: v0.19.3-815-g50e67d4034e858-dirty
|
||||
kv, kpv := strings.TrimPrefix(n.Status.NodeInfo.KubeletVersion, "v"),
|
||||
strings.TrimPrefix(n.Status.NodeInfo.KubeProxyVersion, "v")
|
||||
if !strings.HasPrefix(kv, want) {
|
||||
return fmt.Errorf("node %s had kubelet version %s which does not start with %s",
|
||||
n.ObjectMeta.Name, kv, want)
|
||||
}
|
||||
if !strings.HasPrefix(kpv, want) {
|
||||
return fmt.Errorf("node %s had kube-proxy version %s which does not start with %s",
|
||||
n.ObjectMeta.Name, kpv, want)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
5127
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
Normal file
5127
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
508
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
Normal file
508
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
Normal file
@ -0,0 +1,508 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This test checks that various VolumeSources are working.
|
||||
*
|
||||
* There are two ways, how to test the volumes:
|
||||
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
|
||||
* The test creates a server pod, exporting simple 'index.html' file.
|
||||
* Then it uses appropriate VolumeSource to import this file into a client pod
|
||||
* and checks that the pod can see the file. It does so by importing the file
|
||||
* into web server root and loadind the index.html from it.
|
||||
*
|
||||
* These tests work only when privileged containers are allowed, exporting
|
||||
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
|
||||
* other privileged magic in the server pod.
|
||||
*
|
||||
* Note that the server containers are for testing purposes only and should not
|
||||
* be used in production.
|
||||
*
|
||||
* 2) With server outside of Kubernetes (Cinder, ...)
|
||||
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
|
||||
* the tested Kubernetes cluster. The test itself creates a new volume,
|
||||
* and checks, that Kubernetes can use it as a volume.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Current supported images for e2e volume testing to be assigned to VolumeTestConfig.serverImage
|
||||
const (
|
||||
NfsServerImage string = "gcr.io/google_containers/volume-nfs:0.8"
|
||||
IscsiServerImage string = "gcr.io/google_containers/volume-iscsi:0.1"
|
||||
GlusterfsServerImage string = "gcr.io/google_containers/volume-gluster:0.2"
|
||||
CephServerImage string = "gcr.io/google_containers/volume-ceph:0.1"
|
||||
RbdServerImage string = "gcr.io/google_containers/volume-rbd:0.1"
|
||||
)
|
||||
|
||||
const (
|
||||
Kb int64 = 1000
|
||||
Mb int64 = 1000 * Kb
|
||||
Gb int64 = 1000 * Mb
|
||||
Tb int64 = 1000 * Gb
|
||||
KiB int64 = 1024
|
||||
MiB int64 = 1024 * KiB
|
||||
GiB int64 = 1024 * MiB
|
||||
TiB int64 = 1024 * GiB
|
||||
)
|
||||
|
||||
// Configuration of one tests. The test consist of:
|
||||
// - server pod - runs serverImage, exports ports[]
|
||||
// - client pod - does not need any special configuration
|
||||
type VolumeTestConfig struct {
|
||||
Namespace string
|
||||
// Prefix of all pods. Typically the test name.
|
||||
Prefix string
|
||||
// Name of container image for the server pod.
|
||||
ServerImage string
|
||||
// Ports to export from the server pod. TCP only.
|
||||
ServerPorts []int
|
||||
// Commands to run in the container image.
|
||||
ServerCmds []string
|
||||
// Arguments to pass to the container image.
|
||||
ServerArgs []string
|
||||
// Volumes needed to be mounted to the server container from the host
|
||||
// map <host (source) path> -> <container (dst.) path>
|
||||
ServerVolumes map[string]string
|
||||
// Wait for the pod to terminate successfully
|
||||
// False indicates that the pod is long running
|
||||
WaitForCompletion bool
|
||||
// ServerNodeName is the spec.nodeName to run server pod on. Default is any node.
|
||||
ServerNodeName string
|
||||
// ClientNodeName is the spec.nodeName to run client pod on. Default is any node.
|
||||
ClientNodeName string
|
||||
// NodeSelector to use in pod spec (server, client and injector pods).
|
||||
NodeSelector map[string]string
|
||||
}
|
||||
|
||||
// VolumeTest contains a volume to mount into a client pod and its
|
||||
// expected content.
|
||||
type VolumeTest struct {
|
||||
Volume v1.VolumeSource
|
||||
File string
|
||||
ExpectedContent string
|
||||
}
|
||||
|
||||
// NFS-specific wrapper for CreateStorageServer.
|
||||
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "nfs",
|
||||
ServerImage: NfsServerImage,
|
||||
ServerPorts: []int{2049},
|
||||
}
|
||||
if len(args) > 0 {
|
||||
config.ServerArgs = args
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
return config, pod, ip
|
||||
}
|
||||
|
||||
// GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
|
||||
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "gluster",
|
||||
ServerImage: GlusterfsServerImage,
|
||||
ServerPorts: []int{24007, 24008, 49152},
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
|
||||
By("creating Gluster endpoints")
|
||||
endpoints := &v1.Endpoints{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Endpoints",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: ip,
|
||||
},
|
||||
},
|
||||
Ports: []v1.EndpointPort{
|
||||
{
|
||||
Name: "gluster",
|
||||
Port: 24007,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create endpoints for Gluster server")
|
||||
|
||||
return config, pod, ip
|
||||
}
|
||||
|
||||
// iSCSI-specific wrapper for CreateStorageServer.
|
||||
func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "iscsi",
|
||||
ServerImage: IscsiServerImage,
|
||||
ServerPorts: []int{3260},
|
||||
ServerVolumes: map[string]string{
|
||||
// iSCSI container needs to insert modules from the host
|
||||
"/lib/modules": "/lib/modules",
|
||||
},
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
return config, pod, ip
|
||||
}
|
||||
|
||||
// CephRBD-specific wrapper for CreateStorageServer.
|
||||
func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "rbd",
|
||||
ServerImage: RbdServerImage,
|
||||
ServerPorts: []int{6789},
|
||||
ServerVolumes: map[string]string{
|
||||
"/lib/modules": "/lib/modules",
|
||||
},
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
return config, pod, ip
|
||||
}
|
||||
|
||||
// Wrapper for StartVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
// and ip address string are returned.
|
||||
// Note: Expect() is called so no error is returned.
|
||||
func CreateStorageServer(cs clientset.Interface, config VolumeTestConfig) (pod *v1.Pod, ip string) {
|
||||
pod = StartVolumeServer(cs, config)
|
||||
Expect(pod).NotTo(BeNil(), "storage server pod should not be nil")
|
||||
ip = pod.Status.PodIP
|
||||
Expect(len(ip)).NotTo(BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
|
||||
Logf("%s server pod IP address: %s", config.Prefix, ip)
|
||||
return pod, ip
|
||||
}
|
||||
|
||||
// Starts a container specified by config.serverImage and exports all
|
||||
// config.serverPorts from it. The returned pod should be used to get the server
|
||||
// IP address and create appropriate VolumeSource.
|
||||
func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.Pod {
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
portCount := len(config.ServerPorts)
|
||||
serverPodPorts := make([]v1.ContainerPort, portCount)
|
||||
|
||||
for i := 0; i < portCount; i++ {
|
||||
portName := fmt.Sprintf("%s-%d", config.Prefix, i)
|
||||
|
||||
serverPodPorts[i] = v1.ContainerPort{
|
||||
Name: portName,
|
||||
ContainerPort: int32(config.ServerPorts[i]),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}
|
||||
}
|
||||
|
||||
volumeCount := len(config.ServerVolumes)
|
||||
volumes := make([]v1.Volume, volumeCount)
|
||||
mounts := make([]v1.VolumeMount, volumeCount)
|
||||
|
||||
i := 0
|
||||
for src, dst := range config.ServerVolumes {
|
||||
mountName := fmt.Sprintf("path%d", i)
|
||||
volumes[i].Name = mountName
|
||||
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
|
||||
Path: src,
|
||||
}
|
||||
|
||||
mounts[i].Name = mountName
|
||||
mounts[i].ReadOnly = false
|
||||
mounts[i].MountPath = dst
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
serverPodName := fmt.Sprintf("%s-server", config.Prefix)
|
||||
By(fmt.Sprint("creating ", serverPodName, " pod"))
|
||||
privileged := new(bool)
|
||||
*privileged = true
|
||||
|
||||
restartPolicy := v1.RestartPolicyAlways
|
||||
if config.WaitForCompletion {
|
||||
restartPolicy = v1.RestartPolicyNever
|
||||
}
|
||||
serverPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serverPodName,
|
||||
Labels: map[string]string{
|
||||
"role": serverPodName,
|
||||
},
|
||||
},
|
||||
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: serverPodName,
|
||||
Image: config.ServerImage,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: privileged,
|
||||
},
|
||||
Command: config.ServerCmds,
|
||||
Args: config.ServerArgs,
|
||||
Ports: serverPodPorts,
|
||||
VolumeMounts: mounts,
|
||||
},
|
||||
},
|
||||
Volumes: volumes,
|
||||
RestartPolicy: restartPolicy,
|
||||
NodeName: config.ServerNodeName,
|
||||
NodeSelector: config.NodeSelector,
|
||||
},
|
||||
}
|
||||
|
||||
var pod *v1.Pod
|
||||
serverPod, err := podClient.Create(serverPod)
|
||||
// ok if the server pod already exists. TODO: make this controllable by callers
|
||||
if err != nil {
|
||||
if apierrs.IsAlreadyExists(err) {
|
||||
Logf("Ignore \"already-exists\" error, re-get pod...")
|
||||
By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
|
||||
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||
ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
|
||||
pod = serverPod
|
||||
} else {
|
||||
ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
|
||||
}
|
||||
}
|
||||
if config.WaitForCompletion {
|
||||
ExpectNoError(WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
|
||||
ExpectNoError(podClient.Delete(serverPod.Name, nil))
|
||||
} else {
|
||||
ExpectNoError(WaitForPodRunningInNamespace(client, serverPod))
|
||||
if pod == nil {
|
||||
By(fmt.Sprintf("locating the %q server pod", serverPodName))
|
||||
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
|
||||
}
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// Clean both server and client pods.
|
||||
func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
|
||||
By(fmt.Sprint("cleaning the environment after ", config.Prefix))
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
client := f.ClientSet
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
err := podClient.Delete(config.Prefix+"-client", nil)
|
||||
if err != nil {
|
||||
// Log the error before failing test: if the test has already failed,
|
||||
// framework.ExpectNoError() won't print anything to logs!
|
||||
glog.Warningf("Failed to delete client pod: %v", err)
|
||||
ExpectNoError(err, "Failed to delete client pod: %v", err)
|
||||
}
|
||||
|
||||
if config.ServerImage != "" {
|
||||
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
||||
ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
||||
}
|
||||
// See issue #24100.
|
||||
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
||||
By("sleeping a bit so client can stop and unmount")
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
err = podClient.Delete(config.Prefix+"-server", nil)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to delete server pod: %v", err)
|
||||
ExpectNoError(err, "Failed to delete server pod: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start a client pod using given VolumeSource (exported by startVolumeServer())
|
||||
// and check that the pod sees expected data, e.g. from the server pod.
|
||||
// Multiple VolumeTests can be specified to mount multiple volumes to a single
|
||||
// pod.
|
||||
func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, tests []VolumeTest) {
|
||||
By(fmt.Sprint("starting ", config.Prefix, " client"))
|
||||
var gracePeriod int64 = 1
|
||||
clientPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-client",
|
||||
Labels: map[string]string{
|
||||
"role": config.Prefix + "-client",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Prefix + "-client",
|
||||
Image: BusyBoxImage,
|
||||
WorkingDir: "/opt",
|
||||
// An imperative and easily debuggable container which reads vol contents for
|
||||
// us to scan in the tests or by eye.
|
||||
// We expect that /opt is empty in the minimal containers which we use in this test.
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{},
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1",
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{},
|
||||
NodeName: config.ClientNodeName,
|
||||
NodeSelector: config.NodeSelector,
|
||||
},
|
||||
}
|
||||
podsNamespacer := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
if fsGroup != nil {
|
||||
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
|
||||
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: fmt.Sprintf("/opt/%d", i),
|
||||
})
|
||||
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: test.Volume,
|
||||
})
|
||||
}
|
||||
clientPod, err := podsNamespacer.Create(clientPod)
|
||||
if err != nil {
|
||||
Failf("Failed to create %s pod: %v", clientPod.Name, err)
|
||||
}
|
||||
ExpectNoError(WaitForPodRunningInNamespace(client, clientPod))
|
||||
|
||||
By("Checking that text file contents are perfect.")
|
||||
for i, test := range tests {
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"cat", fileName}, test.ExpectedContent, time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file %s.", fileName)
|
||||
}
|
||||
|
||||
if fsGroup != nil {
|
||||
By("Checking fsGroup is correct.")
|
||||
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
|
||||
}
|
||||
}
|
||||
|
||||
// Insert index.html with given content into given volume. It does so by
|
||||
// starting and auxiliary pod which writes the file there.
|
||||
// The volume must be writable.
|
||||
func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
|
||||
By(fmt.Sprint("starting ", config.Prefix, " injector"))
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
injectPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-injector",
|
||||
Labels: map[string]string{
|
||||
"role": config.Prefix + "-injector",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Prefix + "-injector",
|
||||
Image: BusyBoxImage,
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: config.Prefix + "-volume",
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: config.Prefix + "-volume",
|
||||
VolumeSource: volume,
|
||||
},
|
||||
},
|
||||
NodeSelector: config.NodeSelector,
|
||||
},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
podClient.Delete(config.Prefix+"-injector", nil)
|
||||
}()
|
||||
|
||||
injectPod, err := podClient.Create(injectPod)
|
||||
ExpectNoError(err, "Failed to create injector pod: %v", err)
|
||||
err = WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func CreateGCEVolume() (*v1.PersistentVolumeSource, string) {
|
||||
diskName, err := CreatePDWithRetry()
|
||||
ExpectNoError(err)
|
||||
return &v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}, diskName
|
||||
}
|
Reference in New Issue
Block a user