vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
@ -10,6 +11,7 @@ go_library(
srcs = [
"authorizer_util.go",
"cleanup.go",
"crd_util.go",
"deployment_util.go",
"exec_util.go",
"firewall_util.go",
@ -26,6 +28,7 @@ go_library(
"nodes_util.go",
"perf_util.go",
"pods.go",
"profile_gatherer.go",
"psp_util.go",
"pv_util.go",
"rc_util.go",
@ -47,7 +50,6 @@ go_library(
"//pkg/apis/apps:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/conditions:go_default_library",
@ -57,9 +59,10 @@ go_library(
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/node:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubectl:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//pkg/kubelet/dockershim/metrics:go_default_library",
@ -69,15 +72,15 @@ go_library(
"//pkg/kubelet/util/format:go_default_library",
"//pkg/kubemark:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/schedulercache:go_default_library",
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
"//pkg/ssh:go_default_library",
"//pkg/util/file:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/taints:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
"//plugin/pkg/scheduler/schedulercache:go_default_library",
"//pkg/volume/util:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/manifest:go_default_library",
@ -101,7 +104,7 @@ go_library(
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
@ -109,7 +112,11 @@ go_library(
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
@ -131,14 +138,18 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
@ -165,3 +176,9 @@ filegroup(
],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["firewall_util_test.go"],
embed = [":go_default_library"],
)

126
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go generated vendored Normal file
View File

@ -0,0 +1,126 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/dynamic"
)
// CleanCrdFn declares the clean up function needed to remove the CRD
type CleanCrdFn func() error
// TestCrd holds all the pieces needed to test with the CRD
type TestCrd struct {
Name string
Kind string
ApiGroup string
ApiVersion string
ApiExtensionClient *crdclientset.Clientset
Crd *apiextensionsv1beta1.CustomResourceDefinition
DynamicClient dynamic.ResourceInterface
CleanUp CleanCrdFn
}
// CreateTestCRD creates a new CRD specifically for the calling test.
func CreateTestCRD(f *Framework) (*TestCrd, error) {
suffix := randomSuffix()
name := fmt.Sprintf("e2e-test-%s-%s-crd", f.BaseName, suffix)
kind := fmt.Sprintf("E2e-test-%s-%s-crd", f.BaseName, suffix)
group := fmt.Sprintf("%s-crd-test.k8s.io", f.BaseName)
apiVersion := "v1"
testcrd := &TestCrd{
Name: name,
Kind: kind,
ApiGroup: group,
ApiVersion: apiVersion,
}
// Creating a custom resource definition for use by assorted tests.
config, err := LoadConfig()
if err != nil {
Failf("failed to load config: %v", err)
return nil, err
}
apiExtensionClient, err := crdclientset.NewForConfig(config)
if err != nil {
Failf("failed to initialize apiExtensionClient: %v", err)
return nil, err
}
crd := newCRDForTest(testcrd)
//create CRD and waits for the resource to be recognized and available.
dynamicClient, err := testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient, f.ClientPool)
if err != nil {
Failf("failed to create CustomResourceDefinition: %v", err)
return nil, err
}
resourceClient := dynamicClient.Resource(&metav1.APIResource{
Name: crd.Spec.Names.Plural,
Namespaced: true,
}, f.Namespace.Name)
testcrd.ApiExtensionClient = apiExtensionClient
testcrd.Crd = crd
testcrd.DynamicClient = resourceClient
testcrd.CleanUp = func() error {
err := testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
if err != nil {
Failf("failed to delete CustomResourceDefinition(%s): %v", name, err)
}
return err
}
return testcrd, nil
}
// newCRDForTest generates a CRD definition for the test
func newCRDForTest(testcrd *TestCrd) *apiextensionsv1beta1.CustomResourceDefinition {
return &apiextensionsv1beta1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{Name: testcrd.GetMetaName()},
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
Group: testcrd.ApiGroup,
Version: testcrd.ApiVersion,
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: testcrd.GetPluralName(),
Singular: testcrd.Name,
Kind: testcrd.Kind,
ListKind: testcrd.GetListName(),
},
Scope: apiextensionsv1beta1.NamespaceScoped,
},
}
}
// GetMetaName returns the metaname for the CRD.
func (c *TestCrd) GetMetaName() string {
return c.Name + "s." + c.ApiGroup
}
// GetPluralName returns the plural form of the CRD name
func (c *TestCrd) GetPluralName() string {
return c.Name + "s"
}
// GetListName returns the name for the CRD list resources
func (c *TestCrd) GetListName() string {
return c.Name + "List"
}

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
@ -126,9 +127,9 @@ func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensio
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
}
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
return testutils.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, pollLongTimeout)
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
return testutils.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, pollLongTimeout)
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
@ -178,8 +179,8 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
return err
}
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments"))
}
func RunDeployment(config testutils.DeploymentConfig) error {
@ -213,9 +214,9 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
}
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) {
deploymentSpec := MakeDeployment(replicas, podLabels, namespace, pvclaims, false, command)
deployment, err := client.Extensions().Deployments(namespace).Create(deploymentSpec)
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) {
deploymentSpec := MakeDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(deploymentSpec)
if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
}
@ -229,7 +230,7 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[
// MakeDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakeDeployment(replicas int32, podLabels map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment {
func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment {
if len(command) == 0 {
command = "while true; do sleep 1; done"
}
@ -273,6 +274,9 @@ func MakeDeployment(replicas int32, podLabels map[string]string, namespace strin
}
deploymentSpec.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts
deploymentSpec.Spec.Template.Spec.Volumes = volumes
if nodeSelector != nil {
deploymentSpec.Spec.Template.Spec.NodeSelector = nodeSelector
}
return deploymentSpec
}
@ -286,7 +290,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Dep
return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
}
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return client.Core().Pods(namespace).List(options)
return client.CoreV1().Pods(namespace).List(options)
}
rsList := []*extensions.ReplicaSet{replicaSet}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)

View File

@ -24,7 +24,6 @@ import (
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@ -100,7 +99,7 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service,
// GetInstanceTags gets tags from GCE instance with given name.
func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags {
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
res, err := gceCloud.GetComputeService().Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
instanceName).Do()
if err != nil {
Failf("Failed to get instance tags for %v: %v", instanceName, err)
@ -113,7 +112,7 @@ func SetInstanceTags(cloudConfig CloudConfig, instanceName, zone string, tags []
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
// Re-get instance everytime because we need the latest fingerprint for updating metadata
resTags := GetInstanceTags(cloudConfig, instanceName)
_, err := gceCloud.GetComputeService().Instances.SetTags(
_, err := gceCloud.ComputeServices().GA.Instances.SetTags(
cloudConfig.ProjectID, zone, instanceName,
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
if err != nil {
@ -303,6 +302,83 @@ func PackProtocolsPortsFromFirewall(alloweds []*compute.FirewallAllowed) []strin
return protocolPorts
}
type portRange struct {
protocol string
min, max int
}
func toPortRange(s string) (pr portRange, err error) {
protoPorts := strings.Split(s, "/")
// Set protocol
pr.protocol = strings.ToUpper(protoPorts[0])
if len(protoPorts) != 2 {
return pr, fmt.Errorf("expected a single '/' in %q", s)
}
ports := strings.Split(protoPorts[1], "-")
switch len(ports) {
case 1:
v, err := strconv.Atoi(ports[0])
if err != nil {
return pr, err
}
pr.min, pr.max = v, v
case 2:
start, err := strconv.Atoi(ports[0])
if err != nil {
return pr, err
}
end, err := strconv.Atoi(ports[1])
if err != nil {
return pr, err
}
pr.min, pr.max = start, end
default:
return pr, fmt.Errorf("unexpected range value %q", protoPorts[1])
}
return pr, nil
}
// isPortsSubset asserts that the "requiredPorts" are covered by the "coverage" ports.
// requiredPorts - must be single-port, examples: 'tcp/50', 'udp/80'.
// coverage - single or port-range values, example: 'tcp/50', 'udp/80-1000'.
// Returns true if every requiredPort exists in the list of coverage rules.
func isPortsSubset(requiredPorts, coverage []string) error {
for _, reqPort := range requiredPorts {
rRange, err := toPortRange(reqPort)
if err != nil {
return err
}
if rRange.min != rRange.max {
return fmt.Errorf("requiring a range is not supported: %q", reqPort)
}
var covered bool
for _, c := range coverage {
cRange, err := toPortRange(c)
if err != nil {
return err
}
if rRange.protocol != cRange.protocol {
continue
}
if rRange.min >= cRange.min && rRange.min <= cRange.max {
covered = true
break
}
}
if !covered {
return fmt.Errorf("%q is not covered by %v", reqPort, coverage)
}
}
return nil
}
// SameStringArray verifies whether two string arrays have the same strings, return error if not.
// Order does not matter.
// When `include` is set to true, verifies whether result includes all elements from expected.
@ -335,10 +411,19 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
if !strings.HasSuffix(res.Network, "/"+network) {
return fmt.Errorf("incorrect network: %v, expected ends with: %v", res.Network, "/"+network)
}
if err := SameStringArray(PackProtocolsPortsFromFirewall(res.Allowed),
PackProtocolsPortsFromFirewall(exp.Allowed), portsSubset); err != nil {
return fmt.Errorf("incorrect allowed protocols ports: %v", err)
actualPorts := PackProtocolsPortsFromFirewall(res.Allowed)
expPorts := PackProtocolsPortsFromFirewall(exp.Allowed)
if portsSubset {
if err := isPortsSubset(expPorts, actualPorts); err != nil {
return fmt.Errorf("incorrect allowed protocol ports: %v", err)
}
} else {
if err := SameStringArray(actualPorts, expPorts, false); err != nil {
return fmt.Errorf("incorrect allowed protocols ports: %v", err)
}
}
if err := SameStringArray(res.SourceRanges, exp.SourceRanges, false); err != nil {
return fmt.Errorf("incorrect source ranges %v, expected %v: %v", res.SourceRanges, exp.SourceRanges, err)
}
@ -371,19 +456,3 @@ func WaitForFirewallRule(gceCloud *gcecloud.GCECloud, fwName string, exist bool,
}
return fw, nil
}
func GetClusterID(c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
if err != nil || cm == nil {
return "", fmt.Errorf("error getting cluster ID: %v", err)
}
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
if !clusterIDExists {
return "", fmt.Errorf("cluster ID not set")
}
if providerIDExists {
return providerID, nil
}
return clusterID, nil
}

View File

@ -0,0 +1,54 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import "testing"
func TestIsPortsSubset(t *testing.T) {
tc := map[string]struct {
required []string
coverage []string
expectErr bool
}{
"Single port coverage": {
required: []string{"tcp/50"},
coverage: []string{"tcp/50", "tcp/60", "tcp/70"},
},
"Port range coverage": {
required: []string{"tcp/50"},
coverage: []string{"tcp/20-30", "tcp/45-60"},
},
"Multiple Port range coverage": {
required: []string{"tcp/50", "tcp/29", "tcp/46"},
coverage: []string{"tcp/20-30", "tcp/45-60"},
},
"Not covered": {
required: []string{"tcp/50"},
coverage: []string{"udp/50", "tcp/49", "tcp/51-60"},
expectErr: true,
},
}
for name, c := range tc {
t.Run(name, func(t *testing.T) {
gotErr := isPortsSubset(c.required, c.coverage)
if c.expectErr != (gotErr != nil) {
t.Errorf("isPortsSubset(%v, %v) = %v, wanted err? %v", c.required, c.coverage, gotErr, c.expectErr)
}
})
}
}

View File

@ -28,14 +28,19 @@ import (
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
cacheddiscovery "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/clientcmd"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/api/legacyscheme"
@ -67,13 +72,15 @@ type Framework struct {
AggregatorClient *aggregatorclient.Clientset
ClientPool dynamic.ClientPool
ScalesGetter scaleclient.ScalesGetter
SkipNamespaceCreation bool // Whether to skip creating a namespace
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
namespacesToDelete []*v1.Namespace // Some tests have more than one.
NamespaceDeletionTimeout time.Duration
SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace
gatherer *containerResourceGatherer
gatherer *ContainerResourceGatherer
// Constraints that passed to a check which is executed after data is gathered to
// see if 99% of results are within acceptable bounds. It has to be injected in the test,
// as expectations vary greatly. Constraints are grouped by the container names.
@ -161,6 +168,25 @@ func (f *Framework) BeforeEach() {
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.ClientPool = dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
// as they are required when creating a REST client.
if config.GroupVersion == nil {
config.GroupVersion = &schema.GroupVersion{}
}
if config.NegotiatedSerializer == nil {
config.NegotiatedSerializer = legacyscheme.Codecs
}
restClient, err := rest.RESTClientFor(config)
Expect(err).NotTo(HaveOccurred())
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
Expect(err).NotTo(HaveOccurred())
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoClient, meta.InterfacesForUnstructured)
restMapper.Reset()
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil {
externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig)
externalConfig.QPS = f.Options.ClientQPS
@ -378,7 +404,7 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
f.namespacesToDelete = append(f.namespacesToDelete, ns)
}
if !f.SkipPrivilegedPSPBinding {
if err == nil && !f.SkipPrivilegedPSPBinding {
CreatePrivilegedPSPBinding(f, ns.Name)
}

View File

@ -22,84 +22,12 @@ import (
"io/ioutil"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
)
// TODO: These should really just use the GCE API client library or at least use
// better formatted output from the --format flag.
func CreateGCEStaticIP(name string) (string, error) {
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
// NAME REGION ADDRESS STATUS
// test-static-ip us-central1 104.197.143.7 RESERVED
var outputBytes []byte
var err error
region, err := gce.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return "", fmt.Errorf("failed to convert zone to region: %v", err)
}
glog.Infof("Creating static IP with name %q in project %q in region %q", name, TestContext.CloudConfig.ProjectID, region)
for attempts := 0; attempts < 4; attempts++ {
outputBytes, err = exec.Command("gcloud", "compute", "addresses", "create",
name, "--project", TestContext.CloudConfig.ProjectID,
"--region", region, "-q", "--format=yaml").CombinedOutput()
if err == nil {
break
}
glog.Errorf("output from failed attempt to create static IP: %s", outputBytes)
time.Sleep(time.Duration(5*attempts) * time.Second)
}
if err != nil {
// Ditch the error, since the stderr in the output is what actually contains
// any useful info.
return "", fmt.Errorf("failed to create static IP: %s", outputBytes)
}
output := string(outputBytes)
if strings.Contains(output, "RESERVED") {
r, _ := regexp.Compile("[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+")
staticIP := r.FindString(output)
if staticIP == "" {
return "", fmt.Errorf("static IP not found in gcloud command output: %v", output)
} else {
return staticIP, nil
}
} else {
return "", fmt.Errorf("static IP %q could not be reserved: %v", name, output)
}
}
func DeleteGCEStaticIP(name string) error {
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
// NAME REGION ADDRESS STATUS
// test-static-ip us-central1 104.197.143.7 RESERVED
region, err := gce.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("failed to convert zone to region: %v", err)
}
glog.Infof("Deleting static IP with name %q in project %q in region %q", name, TestContext.CloudConfig.ProjectID, region)
outputBytes, err := exec.Command("gcloud", "compute", "addresses", "delete",
name, "--project", TestContext.CloudConfig.ProjectID,
"--region", region, "-q").CombinedOutput()
if err != nil {
// Ditch the error, since the stderr in the output is what actually contains
// any useful info.
return fmt.Errorf("failed to delete static IP %q: %v", name, string(outputBytes))
}
return nil
}
// Returns master & node image string, or error
func lookupClusterImageSources() (string, string, error) {
// Given args for a gcloud compute command, run it with other args, and return the values,
@ -196,3 +124,33 @@ func LogClusterImageSources() {
Logf("cluster images sources, could not write to %q: %v", filePath, err)
}
}
func CreateManagedInstanceGroup(size int64, zone, template string) error {
// TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud.
_, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
"create",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", zone),
TestContext.CloudConfig.NodeInstanceGroup,
fmt.Sprintf("--size=%d", size),
fmt.Sprintf("--template=%s", template))
if err != nil {
return fmt.Errorf("gcloud compute instance-groups managed create call failed with err: %v", err)
}
return nil
}
func DeleteManagedInstanceGroup(zone string) error {
// TODO(verult): make this hit the compute API directly instead of
// shelling out to gcloud.
_, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
"delete",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", zone),
TestContext.CloudConfig.NodeInstanceGroup)
if err != nil {
return fmt.Errorf("gcloud compute instance-groups managed delete call failed with err: %v", err)
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -158,7 +158,7 @@ func (g *MetricsGrabber) GrabFromControllerManager() (ControllerManagerMetrics,
if !g.registeredMaster {
return ControllerManagerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ControllerManager's metrics gathering.")
}
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName), metav1.NamespaceSystem, ports.ControllerManagerPort)
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName), metav1.NamespaceSystem, ports.InsecureKubeControllerManagerPort)
if err != nil {
return ControllerManagerMetrics{}, err
}

View File

@ -40,6 +40,15 @@ func EtcdUpgrade(target_storage, target_version string) error {
}
}
func IngressUpgrade(isUpgrade bool) error {
switch TestContext.Provider {
case "gce":
return ingressUpgradeGCE(isUpgrade)
default:
return fmt.Errorf("IngressUpgrade() is not implemented for provider %s", TestContext.Provider)
}
}
func MasterUpgrade(v string) error {
switch TestContext.Provider {
case "gce":
@ -58,12 +67,33 @@ func etcdUpgradeGCE(target_storage, target_version string) error {
os.Environ(),
"TEST_ETCD_VERSION="+target_version,
"STORAGE_BACKEND="+target_storage,
"TEST_ETCD_IMAGE=3.1.10")
"TEST_ETCD_IMAGE=3.2.14")
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
return err
}
func ingressUpgradeGCE(isUpgrade bool) error {
var command string
if isUpgrade {
// User specified image to upgrade to.
targetImage := TestContext.IngressUpgradeImage
if targetImage != "" {
command = fmt.Sprintf("sudo sed -i -re 's|(image:)(.*)|\\1 %s|' /etc/kubernetes/manifests/glbc.manifest", targetImage)
} else {
// Upgrade to latest HEAD image.
command = "sudo sed -i -re 's/(image:)(.*)/\\1 gcr.io\\/k8s-ingress-image-push\\/ingress-gce-e2e-glbc-amd64:latest/' /etc/kubernetes/manifests/glbc.manifest"
}
} else {
// Downgrade to latest release image.
command = "sudo sed -i -re 's/(image:)(.*)/\\1 k8s.gcr.io\\/google_containers\\/glbc:0.9.7/' /etc/kubernetes/manifests/glbc.manifest"
}
// Kubelet should restart glbc automatically.
sshResult, err := NodeExec(GetMasterHost(), command)
LogSSHResult(sshResult)
return err
}
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
func MasterUpgradeGCEWithKubeProxyDaemonSet(v string, enableKubeProxyDaemonSet bool) error {
return masterUpgradeGCE(v, enableKubeProxyDaemonSet)
@ -77,7 +107,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
env = append(env,
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
"TEST_ETCD_IMAGE=3.1.10")
"TEST_ETCD_IMAGE=3.2.14")
} else {
// In e2e tests, we skip the confirmation prompt about
// implicit etcd upgrades to simulate the user entering "y".

View File

@ -0,0 +1,187 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strings"
"sync"
"time"
)
const (
// Default value for how long the CPU profile is gathered for.
DefaultCPUProfileSeconds = 30
)
func getProfilesDirectoryPath() string {
return path.Join(TestContext.ReportDir, "profiles")
}
func createProfilesDirectoryIfNeeded() error {
profileDirPath := getProfilesDirectoryPath()
if _, err := os.Stat(profileDirPath); os.IsNotExist(err) {
if mkdirErr := os.Mkdir(profileDirPath, 0777); mkdirErr != nil {
return fmt.Errorf("Failed to create profiles dir: %v", mkdirErr)
}
} else if err != nil {
return fmt.Errorf("Failed to check existence of profiles dir: %v", err)
}
return nil
}
func checkProfileGatheringPrerequisites() error {
if !TestContext.AllowGatheringProfiles {
return fmt.Errorf("Can't gather profiles as --allow-gathering-profiles is false")
}
if TestContext.ReportDir == "" {
return fmt.Errorf("Can't gather profiles as --report-dir is empty")
}
if err := createProfilesDirectoryIfNeeded(); err != nil {
return fmt.Errorf("Failed to ensure profiles dir: %v", err)
}
return nil
}
func gatherProfileOfKind(profileBaseName, kind string) error {
// Get the profile data over SSH.
getCommand := fmt.Sprintf("curl -s localhost:8080/debug/pprof/%s", kind)
sshResult, err := SSH(getCommand, GetMasterHost()+":22", TestContext.Provider)
if err != nil {
return fmt.Errorf("Failed to execute curl command on master through SSH: %v", err)
}
// Write the data to a temp file.
var tmpfile *os.File
tmpfile, err = ioutil.TempFile("", "apiserver-profile")
if err != nil {
return fmt.Errorf("Failed to create temp file for profile data: %v", err)
}
defer os.Remove(tmpfile.Name())
if _, err := tmpfile.Write([]byte(sshResult.Stdout)); err != nil {
return fmt.Errorf("Failed to write temp file with profile data: %v", err)
}
if err := tmpfile.Close(); err != nil {
return fmt.Errorf("Failed to close temp file: %v", err)
}
// Create a graph from the data and write it to a pdf file.
var cmd *exec.Cmd
var profilePrefix string
switch {
// TODO: Support other profile kinds if needed (e.g inuse_space, alloc_objects, mutex, etc)
case kind == "heap":
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", "--alloc_space", tmpfile.Name())
profilePrefix = "ApiserverMemoryProfile_"
case strings.HasPrefix(kind, "profile"):
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", tmpfile.Name())
profilePrefix = "ApiserverCPUProfile_"
default:
return fmt.Errorf("Unknown profile kind provided: %s", kind)
}
outfilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pdf")
var outfile *os.File
outfile, err = os.Create(outfilePath)
if err != nil {
return fmt.Errorf("Failed to create file for the profile graph: %v", err)
}
defer outfile.Close()
cmd.Stdout = outfile
stderr := bytes.NewBuffer(nil)
cmd.Stderr = stderr
if err := cmd.Run(); nil != err {
return fmt.Errorf("Failed to run 'go tool pprof': %v, stderr: %#v", err, stderr.String())
}
return nil
}
// The below exposed functions can take a while to execute as they SSH to the master,
// collect and copy the profile over and then graph it. To allow waiting for these to
// finish before the parent goroutine itself finishes, we accept a sync.WaitGroup
// argument in these functions. Typically you would use the following pattern:
//
// func TestFooBar() {
// var wg sync.WaitGroup
// wg.Add(3)
// go framework.GatherApiserverCPUProfile(&wg, "doing_foo")
// go framework.GatherApiserverMemoryProfile(&wg, "doing_foo")
// <<<< some code doing foo >>>>>>
// go framework.GatherApiserverCPUProfile(&wg, "doing_bar")
// <<<< some code doing bar >>>>>>
// wg.Wait()
// }
//
// If you do not wish to exercise the waiting logic, pass a nil value for the
// waitgroup argument instead. However, then you would be responsible for ensuring
// that the function finishes.
func GatherApiserverCPUProfile(wg *sync.WaitGroup, profileBaseName string) {
GatherApiserverCPUProfileForNSeconds(wg, profileBaseName, DefaultCPUProfileSeconds)
}
func GatherApiserverCPUProfileForNSeconds(wg *sync.WaitGroup, profileBaseName string, n int) {
if wg != nil {
defer wg.Done()
}
if err := checkProfileGatheringPrerequisites(); err != nil {
Logf("Profile gathering pre-requisite failed: %v", err)
return
}
if profileBaseName == "" {
profileBaseName = time.Now().Format(time.RFC3339)
}
if err := gatherProfileOfKind(profileBaseName, fmt.Sprintf("profile?seconds=%v", n)); err != nil {
Logf("Failed to gather apiserver CPU profile: %v", err)
}
}
func GatherApiserverMemoryProfile(wg *sync.WaitGroup, profileBaseName string) {
if wg != nil {
defer wg.Done()
}
if err := checkProfileGatheringPrerequisites(); err != nil {
Logf("Profile gathering pre-requisite failed: %v", err)
return
}
if profileBaseName == "" {
profileBaseName = time.Now().Format(time.RFC3339)
}
if err := gatherProfileOfKind(profileBaseName, "heap"); err != nil {
Logf("Failed to gather apiserver memory profile: %v", err)
}
}
// StartApiserverCPUProfileGatherer is a polling-based gatherer of the apiserver's
// CPU profile. It takes the delay b/w consecutive gatherings as an argument and
// starts the gathering goroutine. To stop the gatherer, close the returned channel.
func StartApiserverCPUProfileGatherer(delay time.Duration) chan struct{} {
stopCh := make(chan struct{})
go func() {
for {
select {
case <-time.After(delay):
GatherApiserverCPUProfile(nil, "")
case <-stopCh:
return
}
}
}()
return stopCh
}

View File

@ -114,30 +114,34 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
psp, err = f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp)
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
// Create the Role to bind it to the namespace.
_, err = f.ClientSet.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
Rules: []rbacv1beta1.PolicyRule{{
APIGroups: []string{"extensions"},
Resources: []string{"podsecuritypolicies"},
ResourceNames: []string{podSecurityPolicyPrivileged},
Verbs: []string{"use"},
}},
})
ExpectNoError(err, "Failed to create PSP role")
if IsRBACEnabled(f) {
// Create the Role to bind it to the namespace.
_, err = f.ClientSet.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
Rules: []rbacv1beta1.PolicyRule{{
APIGroups: []string{"extensions"},
Resources: []string{"podsecuritypolicies"},
ResourceNames: []string{podSecurityPolicyPrivileged},
Verbs: []string{"use"},
}},
})
ExpectNoError(err, "Failed to create PSP role")
}
})
By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
podSecurityPolicyPrivileged, namespace))
BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(),
podSecurityPolicyPrivileged,
namespace,
rbacv1beta1.Subject{
Kind: rbacv1beta1.ServiceAccountKind,
Namespace: namespace,
Name: "default",
})
ExpectNoError(WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
if IsRBACEnabled(f) {
By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
podSecurityPolicyPrivileged, namespace))
BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(),
podSecurityPolicyPrivileged,
namespace,
rbacv1beta1.Subject{
Kind: rbacv1beta1.ServiceAccountKind,
Namespace: namespace,
Name: "default",
})
ExpectNoError(WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
}
}

View File

@ -36,10 +36,10 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/pkg/volume/util"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@ -80,7 +80,8 @@ type PersistentVolumeConfig struct {
NamePrefix string
Labels labels.Set
StorageClassName string
NodeAffinity *v1.NodeAffinity
NodeAffinity *v1.VolumeNodeAffinity
VolumeMode *v1.PersistentVolumeMode
}
// PersistentVolumeClaimConfig is consumed by MakePersistentVolumeClaim() to generate a PVC object.
@ -92,6 +93,7 @@ type PersistentVolumeClaimConfig struct {
Annotations map[string]string
Selector *metav1.LabelSelector
StorageClassName *string
VolumeMode *v1.PersistentVolumeMode
}
// Clean up a pv and pvc in a single pv/pvc test case.
@ -181,7 +183,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
// Wait for the PV's phase to return to be `expectPVPhase`
Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, 1*time.Second, 300*time.Second)
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, Poll, PVReclaimingTimeout)
if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
}
@ -262,6 +264,11 @@ func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVol
return pv, nil
}
// create the PV resource. Fails test on error.
func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(c, pv)
}
// create the PVC resource. Fails test on error.
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
@ -391,14 +398,14 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV
Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, Poll, ClaimBindingTimeout)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
}
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound.
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, Poll, PVBindingTimeout)
if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
}
@ -444,7 +451,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
}
for pvName := range pvols {
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, 3*time.Second, 180*time.Second)
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, Poll, PVBindingTimeout)
if err != nil && len(pvols) > len(claims) {
Logf("WARN: pv %v is not bound after max wait", pvName)
Logf(" This may be ok since there are more pvs than pvcs")
@ -467,7 +474,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
}
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, Poll, ClaimBindingTimeout)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
}
@ -576,12 +583,12 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
Namespace: pvConfig.Prebind.Namespace,
}
}
pv := &v1.PersistentVolume{
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pvConfig.NamePrefix,
Labels: pvConfig.Labels,
Annotations: map[string]string{
volumehelper.VolumeGidAnnotationKey: "777",
util.VolumeGidAnnotationKey: "777",
},
},
Spec: v1.PersistentVolumeSpec{
@ -597,14 +604,10 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
},
ClaimRef: claimRef,
StorageClassName: pvConfig.StorageClassName,
NodeAffinity: pvConfig.NodeAffinity,
VolumeMode: pvConfig.VolumeMode,
},
}
err := helper.StorageNodeAffinityToAlphaAnnotation(pv.Annotations, pvConfig.NodeAffinity)
if err != nil {
Logf("Setting storage node affinity failed: %v", err)
return nil
}
return pv
}
// Returns a PVC definition based on the namespace.
@ -634,6 +637,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
},
},
StorageClassName: cfg.StorageClassName,
VolumeMode: cfg.VolumeMode,
},
}
}
@ -856,7 +860,7 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo
Containers: []v1.Container{
{
Name: "write-pod",
Image: "gcr.io/google_containers/busybox:1.24",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{
@ -867,14 +871,21 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
var volumeMounts = make([]v1.VolumeMount, 0)
var volumeDevices = make([]v1.VolumeDevice, 0)
var volumes = make([]v1.Volume, len(pvclaims))
for index, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", index+1)
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
} else {
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
}
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
podSpec.Spec.Volumes = volumes
podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel
return podSpec
@ -925,6 +936,26 @@ func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeC
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
}
// CreateUnschedulablePod with given claims based on node selector
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to become Unschedulable
err = WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
return pod, nil
}
// wait until all pvcs phase set to bound
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) {
persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims))

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
"k8s.io/kubernetes/pkg/api/testapi"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
@ -84,7 +85,9 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
// none are running, otherwise it does what a synchronous scale operation would do.
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error {
//TODO(p0lyn0mial): remove internalClientset.
//TODO(p0lyn0mial): update the callers.
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error {
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts)
if err != nil {
@ -96,7 +99,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
for _, labelRC := range rcs.Items {
name := labelRC.Name
if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil {
if err := ScaleRC(clientset, internalClientset, scalesGetter, ns, name, replicas, false); err != nil {
return err
}
rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
@ -156,8 +159,8 @@ func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalcl
return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name)
}
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, internalClientset, ns, name, size, wait, api.Kind("ReplicationController"))
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers"))
}
func RunRC(config testutils.RCConfig) error {

View File

@ -191,7 +191,7 @@ func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
}
}
type containerResourceGatherer struct {
type ContainerResourceGatherer struct {
client clientset.Interface
stopCh chan struct{}
workers []resourceGatherWorker
@ -208,8 +208,8 @@ type ResourceGathererOptions struct {
PrintVerboseLogs bool
}
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*containerResourceGatherer, error) {
g := containerResourceGatherer{
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
g := ContainerResourceGatherer{
client: c,
stopCh: make(chan struct{}),
containerIDs: make([]string, 0),
@ -277,7 +277,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
// StartGatheringData starts a stat gathering worker blocks for each node to track,
// and blocks until StopAndSummarize is called.
func (g *containerResourceGatherer) StartGatheringData() {
func (g *ContainerResourceGatherer) StartGatheringData() {
if len(g.workers) == 0 {
return
}
@ -294,7 +294,7 @@ func (g *containerResourceGatherer) StartGatheringData() {
// generates resource summary for the passed-in percentiles, and returns the summary.
// It returns an error if the resource usage at any percentile is beyond the
// specified resource constraints.
func (g *containerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
close(g.stopCh)
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
finished := make(chan struct{})

View File

@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
testutils "k8s.io/kubernetes/test/utils"
)
@ -70,6 +71,54 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
return err
}
// WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(replicaSet.Spec.Replicas) && rs.Status.Replicas == *(rs.Spec.Replicas), nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replicaset %q never had desired number of replicas", replicaSet.Name)
}
return err
}
// WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return rs.Status.ObservedGeneration >= desiredGeneration && *rs.Spec.Replicas == targetReplicaNum, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replicaset %q never had desired number of .spec.replicas", replicaSet.Name)
}
return err
}
// WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.AvailableReplicas == targetReplicaNum, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replicaset %q never had desired number of .status.availableReplicas", replicaSet.Name)
}
return err
}
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo

View File

@ -802,7 +802,7 @@ func newEchoServerPodSpec(podName string) *v1.Pod {
Containers: []v1.Container{
{
Name: "echoserver",
Image: "gcr.io/google_containers/echoserver:1.6",
Image: imageutils.GetE2EImage(imageutils.EchoServer),
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},
},
@ -1105,22 +1105,6 @@ func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID {
for _, port := range ss.Ports {
for _, addr := range ss.Addresses {
containerPort := port.Port
hostPort := port.Port
// use endpoint annotations to recover the container port in a Mesos setup
// compare contrib/mesos/pkg/service/endpoints_controller.syncService
key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort)
mesosContainerPortString := endpoints.Annotations[key]
if mesosContainerPortString != "" {
mesosContainerPort, err := strconv.Atoi(mesosContainerPortString)
if err != nil {
continue
}
containerPort = int32(mesosContainerPort)
Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString)
}
// Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort)
if _, ok := m[addr.TargetRef.UID]; !ok {
m[addr.TargetRef.UID] = make([]int, 0)
}

View File

@ -28,7 +28,7 @@ import (
. "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1beta1"
apps "k8s.io/api/apps/v1"
appsV1beta2 "k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@ -83,7 +83,7 @@ func NewStatefulSetTester(c clientset.Interface) *StatefulSetTester {
// GetStatefulSet gets the StatefulSet named name in namespace.
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *apps.StatefulSet {
ss, err := s.c.AppsV1beta1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
ss, err := s.c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
}
@ -108,7 +108,7 @@ func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.Sta
Expect(err).NotTo(HaveOccurred())
Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
_, err = s.c.AppsV1beta1().StatefulSets(ns).Create(ss)
_, err = s.c.AppsV1().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
s.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
return ss
@ -245,12 +245,12 @@ func (s *StatefulSetTester) Restart(ss *apps.StatefulSet) {
func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.StatefulSet)) *apps.StatefulSet {
for i := 0; i < 3; i++ {
ss, err := s.c.AppsV1beta1().StatefulSets(ns).Get(name, metav1.GetOptions{})
ss, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
Failf("failed to get statefulset %q: %v", name, err)
}
update(ss)
ss, err = s.c.AppsV1beta1().StatefulSets(ns).Update(ss)
ss, err = s.c.AppsV1().StatefulSets(ns).Update(ss)
if err == nil {
return ss
}
@ -328,7 +328,7 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s
func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.StatefulSet, *v1.PodList) (bool, error)) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.c.AppsV1beta1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
ssGet, err := s.c.AppsV1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -344,7 +344,7 @@ func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.
// The returned StatefulSet contains such a StatefulSetStatus
func (s *StatefulSetTester) WaitForStatus(set *apps.StatefulSet) *apps.StatefulSet {
s.WaitForState(set, func(set2 *apps.StatefulSet, pods *v1.PodList) (bool, error) {
if set2.Status.ObservedGeneration != nil && *set2.Status.ObservedGeneration >= set.Generation {
if set2.Status.ObservedGeneration >= set.Generation {
set = set2
return true, nil
}
@ -613,11 +613,11 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, exp
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.c.AppsV1beta1().StatefulSets(ns).Get(name, metav1.GetOptions{})
ssGet, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
if *ssGet.Status.ObservedGeneration < ss.Generation {
if ssGet.Status.ObservedGeneration < ss.Generation {
return false, nil
}
if ssGet.Status.ReadyReplicas != expectedReplicas {
@ -638,11 +638,11 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expected
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.c.AppsV1beta1().StatefulSets(ns).Get(name, metav1.GetOptions{})
ssGet, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
if *ssGet.Status.ObservedGeneration < ss.Generation {
if ssGet.Status.ObservedGeneration < ss.Generation {
return false, nil
}
if ssGet.Status.Replicas != expectedReplicas {
@ -676,7 +676,7 @@ func (s *StatefulSetTester) SortStatefulPods(pods *v1.PodList) {
// DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns.
func DeleteAllStatefulSets(c clientset.Interface, ns string) {
sst := &StatefulSetTester{c: c}
ssList, err := c.AppsV1beta1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
ssList, err := c.AppsV1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
ExpectNoError(err)
// Scale down each statefulset, then delete it completely.
@ -692,7 +692,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
Logf("Deleting statefulset %v", ss.Name)
// Use OrphanDependents=false so it's deleted synchronously.
// We already made sure the Pods are gone inside Scale().
if err := c.AppsV1beta1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))
}
}
@ -790,7 +790,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1beta1",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -872,7 +872,7 @@ func (sp statefulPodsByOrdinal) Less(i, j int) bool {
type updateStatefulSetFunc func(*apps.StatefulSet)
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) {
statefulSets := c.AppsV1beta1().StatefulSets(namespace)
statefulSets := c.AppsV1().StatefulSets(namespace)
var updateErr error
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil {

View File

@ -26,6 +26,7 @@ import (
"github.com/golang/glog"
"github.com/onsi/ginkgo/config"
"github.com/spf13/viper"
utilflag "k8s.io/apiserver/pkg/util/flag"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
@ -57,14 +58,17 @@ type TestContextType struct {
Prefix string
MinStartupPods int
// Timeout for waiting for system pods to be running
SystemPodsStartupTimeout time.Duration
UpgradeTarget string
EtcdUpgradeStorage string
EtcdUpgradeVersion string
UpgradeImage string
GCEUpgradeScript string
ContainerRuntime string
ContainerRuntimeEndpoint string
SystemPodsStartupTimeout time.Duration
UpgradeTarget string
EtcdUpgradeStorage string
EtcdUpgradeVersion string
IngressUpgradeImage string
UpgradeImage string
GCEUpgradeScript string
ContainerRuntime string
ContainerRuntimeEndpoint string
ContainerRuntimeProcessName string
ContainerRuntimePidFile string
// SystemdServices are comma separated list of systemd services the test framework
// will dump logs for.
SystemdServices string
@ -83,6 +87,7 @@ type TestContextType struct {
GatherLogsSizes bool
GatherMetricsAfterTest string
GatherSuiteMetricsAfterTest bool
AllowGatheringProfiles bool
// If set to 'true' framework will gather ClusterAutoscaler metrics when gathering them for other components.
IncludeClusterAutoscalerMetrics bool
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
@ -101,8 +106,8 @@ type TestContextType struct {
LogexporterGCSPath string
// If the garbage collector is enabled in the kube-apiserver and kube-controller-manager.
GarbageCollectorEnabled bool
// FeatureGates is a set of key=value pairs that describe feature gates for alpha/experimental features.
FeatureGates string
// featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
FeatureGates map[string]bool
// Node e2e specific test context
NodeTestContextType
// Monitoring solution that is used in current cluster.
@ -188,6 +193,7 @@ func RegisterCommonFlags() {
flag.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.")
flag.StringVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", "false", "If set to 'true' framework will gather metrics from all components after each test. If set to 'master' only master component metrics would be gathered.")
flag.BoolVar(&TestContext.GatherSuiteMetricsAfterTest, "gather-suite-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after the whole test suite completes.")
flag.BoolVar(&TestContext.AllowGatheringProfiles, "allow-gathering-profiles", true, "If set to true framework will allow to gather CPU/memory allocation pprof profiles from the master.")
flag.BoolVar(&TestContext.IncludeClusterAutoscalerMetrics, "include-cluster-autoscaler", false, "If set to true, framework will include Cluster Autoscaler when gathering metrics.")
flag.StringVar(&TestContext.OutputPrintType, "output-print-type", "json", "Format in which summaries should be printed: 'hr' for human readable, 'json' for JSON ones.")
flag.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
@ -200,10 +206,12 @@ func RegisterCommonFlags() {
flag.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set", defaultHost))
flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
flag.StringVar(&TestContext.FeatureGates, "feature-gates", "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
flag.Var(utilflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
flag.StringVar(&TestContext.Viper, "viper-config", "e2e", "The name of the viper config i.e. 'e2e' will read values from 'e2e.json' locally. All e2e parameters are meant to be configurable by viper.")
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/rkt/remote).")
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "", "The container runtime endpoint of cluster VM instances.")
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.")
flag.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
flag.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.")
flag.StringVar(&TestContext.SystemdServices, "systemd-services", "docker", "The comma separated list of systemd services the framework will dump logs for.")
flag.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.")
flag.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.")
@ -221,7 +229,7 @@ func RegisterClusterFlags() {
flag.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.")
flag.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
flag.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, vagrant, etc.)")
flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, etc.)")
flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
@ -254,6 +262,7 @@ func RegisterClusterFlags() {
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.UpgradeImage, "upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.")
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
flag.BoolVar(&TestContext.GarbageCollectorEnabled, "garbage-collector-enabled", true, "Set to true if the garbage collector is enabled in the kube-apiserver and kube-controller-manager, then some tests will rely on the garbage collector to delete dependent resources.")
@ -292,7 +301,7 @@ func ViperizeFlags() {
viper.AddConfigPath(".")
viper.ReadInConfig()
// TODO Consider wether or not we want to use overwriteFlagsWithViperConfig().
// TODO Consider whether or not we want to use overwriteFlagsWithViperConfig().
viper.Unmarshal(&TestContext)
AfterReadingAllFlags(&TestContext)

View File

@ -14,8 +14,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["timer_test.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/timer",
library = ":go_default_library",
embed = [":go_default_library"],
deps = ["//vendor/github.com/onsi/gomega:go_default_library"],
)

View File

@ -33,7 +33,6 @@ import (
"path"
"path/filepath"
"regexp"
goruntime "runtime"
"sort"
"strconv"
"strings"
@ -51,6 +50,7 @@ import (
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
apps "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
@ -62,6 +62,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
@ -75,6 +76,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@ -87,17 +89,18 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
nodectlr "k8s.io/kubernetes/pkg/controller/node"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubectl"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
sshutil "k8s.io/kubernetes/pkg/ssh"
"k8s.io/kubernetes/pkg/util/system"
taintutils "k8s.io/kubernetes/pkg/util/taints"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
testutil "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -154,9 +157,20 @@ const (
// How long claims have to become dynamically provisioned
ClaimProvisionTimeout = 5 * time.Minute
// When these values are updated, also update cmd/kubelet/app/options/options.go
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
currentPodInfraContainerImageVersion = "3.0"
// How long claims have to become bound
ClaimBindingTimeout = 3 * time.Minute
// How long claims have to become deleted
ClaimDeletingTimeout = 3 * time.Minute
// How long PVs have to beome reclaimed
PVReclaimingTimeout = 3 * time.Minute
// How long PVs have to become bound
PVBindingTimeout = 3 * time.Minute
// How long PVs have to become deleted
PVDeletingTimeout = 3 * time.Minute
// How long a node is allowed to become "Ready" after it is restarted before
// the test is considered failed.
@ -230,13 +244,7 @@ func GetServerArchitecture(c clientset.Interface) string {
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c clientset.Interface) string {
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
}
// GetPauseImageNameForHostArch fetches the pause image name for the same architecture the test is running on.
// TODO: move this function to the test/utils
func GetPauseImageNameForHostArch() string {
return currentPodInfraContainerImageName + "-" + goruntime.GOARCH + ":" + currentPodInfraContainerImageVersion
return imageutils.GetE2EImageWithArch(imageutils.Pause, GetServerArchitecture(c))
}
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
@ -331,6 +339,16 @@ func SkipUnlessProviderIs(supportedProviders ...string) {
}
}
func SkipUnlessMultizone(c clientset.Interface) {
zones, err := GetClusterZones(c)
if err != nil {
Skipf("Error listing cluster zones")
}
if zones.Len() <= 1 {
Skipf("Requires more than one zone")
}
}
func SkipUnlessClusterMonitoringModeIs(supportedMonitoring ...string) {
if !ClusterMonitoringModeIs(supportedMonitoring...) {
Skipf("Only next monitoring modes are supported %v (not %s)", supportedMonitoring, TestContext.ClusterMonitoringMode)
@ -892,6 +910,26 @@ func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.In
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForStatefulSetReplicasReady waits for all replicas of a StatefulSet to become ready or until timeout occurs, whichever comes first.
func WaitForStatefulSetReplicasReady(statefulSetName, ns string, c clientset.Interface, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for StatefulSet %s to have all replicas ready", timeout, statefulSetName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
sts, err := c.AppsV1().StatefulSets(ns).Get(statefulSetName, metav1.GetOptions{})
if err != nil {
Logf("Get StatefulSet %s failed, ignoring for %v: %v", statefulSetName, Poll, err)
continue
} else {
if sts.Status.ReadyReplicas == *sts.Spec.Replicas {
Logf("All %d replicas of StatefulSet %s are ready. (%v)", sts.Status.ReadyReplicas, statefulSetName, time.Since(start))
return nil
} else {
Logf("StatefulSet %s found but there are %d ready replicas and %d total replicas.", statefulSetName, sts.Status.ReadyReplicas, *sts.Spec.Replicas)
}
}
}
return fmt.Errorf("StatefulSet %s still has unready pods within %v", statefulSetName, timeout)
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
@ -1204,6 +1242,10 @@ func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, n
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
continue
}
// skip unavailable servers
if apierrs.IsServiceUnavailable(err) {
continue
}
return false, err
}
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
@ -1551,6 +1593,28 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
})
}
// WaitForPodNameUnschedulableInNamespace returns an error if it takes too long for the pod to become Pending
// and have condition Status equal to Unschedulable,
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason.
// Typically called to test that the passed-in pod is Pending and Unschedulable.
func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error {
return WaitForPodCondition(c, namespace, podName, "Unschedulable", PodStartTimeout, func(pod *v1.Pod) (bool, error) {
// Only consider Failed pods. Successful pods will be deleted and detected in
// waitForPodCondition's Get call returning `IsNotFound`
if pod.Status.Phase == v1.PodPending {
for _, cond := range pod.Status.Conditions {
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
return true, nil
}
}
}
if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
return true, fmt.Errorf("Expected pod %q in namespace %q to be in phase Pending, but got phase: %v", podName, namespace, pod.Status.Phase)
}
return false, nil
})
}
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
@ -1649,7 +1713,7 @@ func WaitForEndpoint(c clientset.Interface, ns, name string) error {
}
// Context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with ther own pod name.
// proxy) and verifying that they answer with there own pod name.
type podProxyResponseChecker struct {
c clientset.Interface
ns string
@ -2130,8 +2194,8 @@ func (b kubectlBuilder) Exec() (string, error) {
if err != nil {
var rc int = 127
if ee, ok := err.(*exec.ExitError); ok {
Logf("rc: %d", rc)
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
Logf("rc: %d", rc)
}
return "", uexec.CodeExitError{
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err),
@ -2161,6 +2225,26 @@ func RunKubectlOrDieInput(data string, args ...string) string {
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
}
// runKubemciWithKubeconfig is a convenience wrapper over runKubemciCmd
func runKubemciWithKubeconfig(args ...string) (string, error) {
if TestContext.KubeConfig != "" {
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
}
return runKubemciCmd(args...)
}
// runKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci.
// It assumes that kubemci exists in PATH.
func runKubemciCmd(args ...string) (string, error) {
// kubemci is assumed to be in PATH.
kubemci := "kubemci"
b := new(kubectlBuilder)
args = append(args, "--gcp-project="+TestContext.CloudConfig.ProjectID)
b.cmd = exec.Command(kubemci, args...)
return b.Exec()
}
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
if err != nil {
@ -2687,26 +2771,19 @@ func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) {
ExpectNoError(err)
}
func getScalerForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Scaler, error) {
return kubectl.ScalerFor(kind, internalClientset)
}
func ScaleResource(
clientset clientset.Interface,
internalClientset internalclientset.Interface,
scalesGetter scaleclient.ScalesGetter,
ns, name string,
size uint,
wait bool,
kind schema.GroupKind,
gr schema.GroupResource,
) error {
By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
scaler, err := getScalerForKind(internalClientset, kind)
if err != nil {
return err
}
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
scaler := kubectl.ScalerFor(kind, internalClientset.Batch(), scalesGetter, gr)
if err := testutil.ScaleResourceWithRetries(scaler, ns, name, size); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
}
if !wait {
@ -3130,10 +3207,10 @@ func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Obj
})
}
type updateDSFunc func(*extensions.DaemonSet)
type updateDSFunc func(*apps.DaemonSet)
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *extensions.DaemonSet, err error) {
daemonsets := c.ExtensionsV1beta1().DaemonSets(namespace)
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *apps.DaemonSet, err error) {
daemonsets := c.AppsV1().DaemonSets(namespace)
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {
@ -3459,12 +3536,6 @@ func GetSigner(provider string) (ssh.Signer, error) {
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
case "vagrant":
keyfile = os.Getenv("VAGRANT_SSH_KEY")
if len(keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(keyfile)
}
return nil, fmt.Errorf("VAGRANT_SSH_KEY env variable should be provided")
case "local", "vsphere":
keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe?
if len(keyfile) == 0 {
@ -3905,9 +3976,7 @@ func sshRestartMaster() error {
}
var command string
if ProviderIs("gce") {
// `kube-apiserver_kube-apiserver` matches the name of the apiserver
// container.
command = "sudo docker ps | grep kube-apiserver_kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
command = "pidof kube-apiserver | xargs sudo kill"
} else {
command = "sudo /etc/init.d/kube-apiserver restart"
}
@ -3938,9 +4007,9 @@ func RestartControllerManager() error {
if ProviderIs("gce") && !MasterOSDistroIs("gci") {
return fmt.Errorf("unsupported master OS distro: %s", TestContext.MasterOSDistro)
}
cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1 | xargs sudo docker kill"
cmd := "pidof kube-controller-manager | xargs sudo kill"
Logf("Restarting controller-manager via ssh, running: %v", cmd)
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart controller-manager: %v", err)
@ -3949,9 +4018,9 @@ func RestartControllerManager() error {
}
func WaitForControllerManagerUp() error {
cmd := "curl http://localhost:" + strconv.Itoa(ports.ControllerManagerPort) + "/healthz"
cmd := "curl http://localhost:" + strconv.Itoa(ports.InsecureKubeControllerManagerPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
}
@ -3965,9 +4034,9 @@ func WaitForControllerManagerUp() error {
// CheckForControllerManagerHealthy checks that the controller manager does not crash within "duration"
func CheckForControllerManagerHealthy(duration time.Duration) error {
var PID string
cmd := "sudo docker ps | grep k8s_kube-controller-manager | cut -d ' ' -f 1"
cmd := "pidof kube-controller-manager"
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil {
// We don't necessarily know that it crashed, pipe could just be broken
LogSSHResult(result)
@ -4029,7 +4098,7 @@ func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) e
Logf("Cluster has reached the desired number of ready nodes %d", size)
return nil
}
Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numNodes, numNodes-numReady)
Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
}
@ -4153,42 +4222,6 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
return websocket.DialConfig(cfg)
}
// getIngressAddress returns the ips/hostnames associated with the Ingress.
func getIngressAddress(client clientset.Interface, ns, name string) ([]string, error) {
ing, err := client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
addresses := []string{}
for _, a := range ing.Status.LoadBalancer.Ingress {
if a.IP != "" {
addresses = append(addresses, a.IP)
}
if a.Hostname != "" {
addresses = append(addresses, a.Hostname)
}
}
return addresses, nil
}
// WaitForIngressAddress waits for the Ingress to acquire an address.
func WaitForIngressAddress(c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
ipOrNameList, err := getIngressAddress(c, ns, ingName)
if err != nil || len(ipOrNameList) == 0 {
Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
address = ipOrNameList[0]
return true, nil
})
return address, err
}
// Looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
@ -4340,7 +4373,7 @@ func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
service := gceCloud.GetComputeService()
service := gceCloud.ComputeServices().GA
list, err := service.ForwardingRules.List(project, region).Do()
if err != nil {
return false, err
@ -4810,6 +4843,22 @@ func (p *E2ETestNodePreparer) CleanupNodes() error {
return encounteredError
}
func GetClusterID(c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
if err != nil || cm == nil {
return "", fmt.Errorf("error getting cluster ID: %v", err)
}
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
if !clusterIDExists {
return "", fmt.Errorf("cluster ID not set")
}
if providerIDExists {
return providerID, nil
}
return clusterID, nil
}
// CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with
// the given name. The name is usually the UUID of the Service prefixed with an
// alpha-numeric character ('a') to work around cloudprovider rules.
@ -4960,6 +5009,7 @@ func PollURL(route, host string, timeout time.Duration, interval time.Duration,
Logf("host %v path %v: %v unreachable", host, route, err)
return expectUnreachable, nil
}
Logf("host %v path %v: reached", host, route)
return !expectUnreachable, nil
})
if pollErr != nil {
@ -5059,8 +5109,9 @@ func DumpDebugInfo(c clientset.Interface, ns string) {
}
}
// TODO: Get rid of this duplicate function in favour of the one in test/utils.
func IsRetryableAPIError(err error) bool {
return apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || apierrs.IsTooManyRequests(err) || apierrs.IsInternalError(err)
return apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || apierrs.IsTooManyRequests(err) || utilnet.IsProbableEOF(err)
}
// DsFromManifest reads a .json/yaml file and returns the daemonset in it.
@ -5125,3 +5176,35 @@ func waitForServerPreferredNamespacedResources(d discovery.DiscoveryInterface, t
}
return resources, nil
}
// WaitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
return nil
}
Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err)
}
}
return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout)
}
func GetClusterZones(c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
}
// collect values of zone label from all nodes
zones := sets.NewString()
for _, node := range nodes.Items {
if zone, found := node.Labels[kubeletapis.LabelZoneFailureDomain]; found {
zones.Insert(zone)
}
}
return zones, nil
}

View File

@ -48,21 +48,13 @@ import (
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// Current supported images for e2e volume testing to be assigned to VolumeTestConfig.serverImage
const (
NfsServerImage string = "gcr.io/google_containers/volume-nfs:0.8"
IscsiServerImage string = "gcr.io/google_containers/volume-iscsi:0.1"
GlusterfsServerImage string = "gcr.io/google_containers/volume-gluster:0.2"
CephServerImage string = "gcr.io/google_containers/volume-ceph:0.1"
RbdServerImage string = "gcr.io/google_containers/volume-rbd:0.1"
)
const (
Kb int64 = 1000
Mb int64 = 1000 * Kb
@ -72,6 +64,9 @@ const (
MiB int64 = 1024 * KiB
GiB int64 = 1024 * MiB
TiB int64 = 1024 * GiB
// Waiting period for volume server (Ceph, ...) to initialize itself.
VolumeServerPodStartupSleep = 20 * time.Second
)
// Configuration of one tests. The test consist of:
@ -91,6 +86,7 @@ type VolumeTestConfig struct {
ServerArgs []string
// Volumes needed to be mounted to the server container from the host
// map <host (source) path> -> <container (dst.) path>
// if <host (source) path> is empty, mount a tmpfs emptydir
ServerVolumes map[string]string
// Wait for the pod to terminate successfully
// False indicates that the pod is long running
@ -114,10 +110,11 @@ type VolumeTest struct {
// NFS-specific wrapper for CreateStorageServer.
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "nfs",
ServerImage: NfsServerImage,
ServerPorts: []int{2049},
Namespace: namespace,
Prefix: "nfs",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
ServerPorts: []int{2049},
ServerVolumes: map[string]string{"": "/exports"},
}
if len(args) > 0 {
config.ServerArgs = args
@ -131,7 +128,7 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config Volume
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "gluster",
ServerImage: GlusterfsServerImage,
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
ServerPorts: []int{24007, 24008, 49152},
}
pod, ip = CreateStorageServer(cs, config)
@ -173,7 +170,7 @@ func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTest
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "iscsi",
ServerImage: IscsiServerImage,
ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer),
ServerPorts: []int{3260},
ServerVolumes: map[string]string{
// iSCSI container needs to insert modules from the host
@ -189,13 +186,21 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "rbd",
ServerImage: RbdServerImage,
ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
ServerPorts: []int{6789},
ServerVolumes: map[string]string{
"/lib/modules": "/lib/modules",
},
}
pod, ip = CreateStorageServer(cs, config)
// Ceph server container needs some time to start. Tests continue working if
// this sleep is removed, however kubelet logs (and kubectl describe
// <client pod>) would be cluttered with error messages about non-existing
// image.
Logf("sleeping a bit to give ceph server time to initialize")
time.Sleep(VolumeServerPodStartupSleep)
return config, pod, ip
}
@ -238,8 +243,12 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
for src, dst := range config.ServerVolumes {
mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src,
if src == "" {
volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{}
} else {
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src,
}
}
mounts[i].Name = mountName
@ -432,7 +441,7 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
if fsGroup != nil {
By("Checking fsGroup is correct.")
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
Expect(err).NotTo(HaveOccurred(), "failed: getting the right privileges in the file %v", int(*fsGroup))
}
}