vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

60
vendor/k8s.io/kubernetes/test/e2e/scalability/BUILD generated vendored Normal file
View File

@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"density.go",
"empty.go",
"framework.go",
"load.go",
],
importpath = "k8s.io/kubernetes/test/e2e/scalability",
deps = [
"//pkg/apis/batch:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/timer:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/transport:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

9
vendor/k8s.io/kubernetes/test/e2e/scalability/OWNERS generated vendored Normal file
View File

@ -0,0 +1,9 @@
approvers:
- gmarek
- shyamjvs
- wojtek-t
reviewers:
- gmarek
- shyamjvs
- timothysc
- wojtek-t

View File

@ -0,0 +1,890 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scalability
import (
"fmt"
"math"
"os"
"sort"
"strconv"
"sync"
"time"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
utiluuid "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
MinSaturationThreshold = 2 * time.Minute
MinPodsPerSecondThroughput = 8
DensityPollInterval = 10 * time.Second
MaxLatencyPodCreationTries = 5
)
// Maximum container failures this test tolerates before failing.
var MaxContainerFailures = 0
// Maximum no. of missing measurements related to pod-startup that the test tolerates.
var MaxMissingPodStartupMeasurements = 0
type DensityTestConfig struct {
Configs []testutils.RunObjectConfig
ClientSets []clientset.Interface
InternalClientsets []internalclientset.Interface
PollInterval time.Duration
PodCount int
// What kind of resource we want to create
kind schema.GroupKind
SecretConfigs []*testutils.SecretConfig
ConfigMapConfigs []*testutils.ConfigMapConfig
DaemonConfigs []*testutils.DaemonConfig
}
func (dtc *DensityTestConfig) runSecretConfigs(testPhase *timer.Phase) {
defer testPhase.End()
for _, sc := range dtc.SecretConfigs {
sc.Run()
}
}
func (dtc *DensityTestConfig) runConfigMapConfigs(testPhase *timer.Phase) {
defer testPhase.End()
for _, cmc := range dtc.ConfigMapConfigs {
cmc.Run()
}
}
func (dtc *DensityTestConfig) runDaemonConfigs(testPhase *timer.Phase) {
defer testPhase.End()
for _, dc := range dtc.DaemonConfigs {
dc.Run()
}
}
func (dtc *DensityTestConfig) deleteSecrets(testPhase *timer.Phase) {
defer testPhase.End()
for i := range dtc.SecretConfigs {
dtc.SecretConfigs[i].Stop()
}
}
func (dtc *DensityTestConfig) deleteConfigMaps(testPhase *timer.Phase) {
defer testPhase.End()
for i := range dtc.ConfigMapConfigs {
dtc.ConfigMapConfigs[i].Stop()
}
}
func (dtc *DensityTestConfig) deleteDaemonSets(numberOfClients int, testPhase *timer.Phase) {
defer testPhase.End()
for i := range dtc.DaemonConfigs {
framework.ExpectNoError(framework.DeleteResourceAndPods(
dtc.ClientSets[i%numberOfClients],
dtc.InternalClientsets[i%numberOfClients],
extensions.Kind("DaemonSet"),
dtc.DaemonConfigs[i].Namespace,
dtc.DaemonConfigs[i].Name,
))
}
}
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
var apiserverMem uint64
var controllerMem uint64
var schedulerMem uint64
apiserverCPU := math.MaxFloat32
apiserverMem = math.MaxUint64
controllerCPU := math.MaxFloat32
controllerMem = math.MaxUint64
schedulerCPU := math.MaxFloat32
schedulerMem = math.MaxUint64
framework.Logf("Setting resource constraings for provider: %s", framework.TestContext.Provider)
if framework.ProviderIs("kubemark") {
if numNodes <= 5 {
apiserverCPU = 0.35
apiserverMem = 150 * (1024 * 1024)
controllerCPU = 0.15
controllerMem = 100 * (1024 * 1024)
schedulerCPU = 0.05
schedulerMem = 50 * (1024 * 1024)
} else if numNodes <= 100 {
apiserverCPU = 1.5
apiserverMem = 1500 * (1024 * 1024)
controllerCPU = 0.5
controllerMem = 500 * (1024 * 1024)
schedulerCPU = 0.4
schedulerMem = 180 * (1024 * 1024)
} else if numNodes <= 500 {
apiserverCPU = 3.5
apiserverMem = 3400 * (1024 * 1024)
controllerCPU = 1.3
controllerMem = 1100 * (1024 * 1024)
schedulerCPU = 1.5
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 1000 {
apiserverCPU = 5.5
apiserverMem = 4000 * (1024 * 1024)
controllerCPU = 3
controllerMem = 2000 * (1024 * 1024)
schedulerCPU = 1.5
schedulerMem = 750 * (1024 * 1024)
}
} else {
if numNodes <= 100 {
apiserverCPU = 1.8
apiserverMem = 1500 * (1024 * 1024)
controllerCPU = 0.5
controllerMem = 500 * (1024 * 1024)
schedulerCPU = 0.4
schedulerMem = 180 * (1024 * 1024)
}
}
constraints := make(map[string]framework.ResourceConstraint)
constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
MemoryConstraint: 250 * (1024 * 1024),
}
constraints["elasticsearch-logging"] = framework.ResourceConstraint{
CPUConstraint: 2,
// TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
MemoryConstraint: 5000 * (1024 * 1024),
}
constraints["heapster"] = framework.ResourceConstraint{
CPUConstraint: 2,
MemoryConstraint: 1800 * (1024 * 1024),
}
constraints["kibana-logging"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
MemoryConstraint: 100 * (1024 * 1024),
}
constraints["kube-proxy"] = framework.ResourceConstraint{
CPUConstraint: 0.15,
MemoryConstraint: 100 * (1024 * 1024),
}
constraints["l7-lb-controller"] = framework.ResourceConstraint{
CPUConstraint: 0.2 + 0.00015*float64(numNodes),
MemoryConstraint: (75 + uint64(math.Ceil(0.8*float64(numNodes)))) * (1024 * 1024),
}
constraints["influxdb"] = framework.ResourceConstraint{
CPUConstraint: 2,
MemoryConstraint: 500 * (1024 * 1024),
}
constraints["kube-apiserver"] = framework.ResourceConstraint{
CPUConstraint: apiserverCPU,
MemoryConstraint: apiserverMem,
}
constraints["kube-controller-manager"] = framework.ResourceConstraint{
CPUConstraint: controllerCPU,
MemoryConstraint: controllerMem,
}
constraints["kube-scheduler"] = framework.ResourceConstraint{
CPUConstraint: schedulerCPU,
MemoryConstraint: schedulerMem,
}
return constraints
}
func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
label := labels.SelectorFromSet(labels.Set(observedLabels))
podStore := testutils.NewPodStore(c, metav1.NamespaceAll, label, fields.Everything())
defer podStore.Stop()
ticker := time.NewTicker(period)
defer ticker.Stop()
for {
select {
case <-ticker.C:
pods := podStore.List()
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
framework.Logf(startupStatus.String("Density"))
case <-stopCh:
pods := podStore.List()
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
framework.Logf(startupStatus.String("Density"))
return
}
}
}
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) time.Duration {
defer GinkgoRecover()
// Create all secrets, configmaps and daemons.
dtc.runSecretConfigs(testPhaseDurations.StartPhase(250, "secrets creation"))
dtc.runConfigMapConfigs(testPhaseDurations.StartPhase(260, "configmaps creation"))
dtc.runDaemonConfigs(testPhaseDurations.StartPhase(270, "daemonsets creation"))
replicationCtrlStartupPhase := testPhaseDurations.StartPhase(300, "saturation pods creation")
defer replicationCtrlStartupPhase.End()
// Start all replication controllers.
startTime := time.Now()
wg := sync.WaitGroup{}
wg.Add(len(dtc.Configs))
for i := range dtc.Configs {
config := dtc.Configs[i]
go func() {
defer GinkgoRecover()
// Call wg.Done() in defer to avoid blocking whole test
// in case of error from RunRC.
defer wg.Done()
framework.ExpectNoError(config.Run())
}()
}
logStopCh := make(chan struct{})
go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
wg.Wait()
startupTime := time.Now().Sub(startTime)
close(logStopCh)
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
replicationCtrlStartupPhase.End()
printPodAllocationPhase := testPhaseDurations.StartPhase(400, "printing pod allocation")
defer printPodAllocationPhase.End()
// Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data")
podList, err := dtc.ClientSets[0].CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
framework.ExpectNoError(err)
pausePodAllocation := make(map[string]int)
systemPodAllocation := make(map[string][]string)
for _, pod := range podList.Items {
if pod.Namespace == metav1.NamespaceSystem {
systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
} else {
pausePodAllocation[pod.Spec.NodeName]++
}
}
nodeNames := make([]string, 0)
for k := range pausePodAllocation {
nodeNames = append(nodeNames, k)
}
sort.Strings(nodeNames)
for _, node := range nodeNames {
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
}
defer printPodAllocationPhase.End()
return startupTime
}
func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) {
defer GinkgoRecover()
podCleanupPhase := testPhaseDurations.StartPhase(900, "latency pods deletion")
defer podCleanupPhase.End()
By("Deleting created Collections")
numberOfClients := len(dtc.ClientSets)
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
for i := range dtc.Configs {
name := dtc.Configs[i].GetName()
namespace := dtc.Configs[i].GetNamespace()
kind := dtc.Configs[i].GetKind()
if framework.TestContext.GarbageCollectorEnabled && kindSupportsGarbageCollector(kind) {
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name)
framework.ExpectNoError(err)
} else {
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
err := framework.DeleteResourceAndPods(dtc.ClientSets[i%numberOfClients], dtc.InternalClientsets[i%numberOfClients], kind, namespace, name)
framework.ExpectNoError(err)
}
}
podCleanupPhase.End()
dtc.deleteSecrets(testPhaseDurations.StartPhase(910, "secrets deletion"))
dtc.deleteConfigMaps(testPhaseDurations.StartPhase(920, "configmaps deletion"))
dtc.deleteDaemonSets(numberOfClients, testPhaseDurations.StartPhase(930, "daemonsets deletion"))
}
// This test suite can take a long time to run, and can affect or be affected by other tests.
// So by default it is added to the ginkgo.skip list (see driver.go).
// To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag.
// IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones
// results will not be representative for control-plane performance as we'll start hitting
// limits on Docker's concurrent container startup.
var _ = SIGDescribe("Density", func() {
var c clientset.Interface
var nodeCount int
var additionalPodsPrefix string
var ns string
var uuid string
var e2eStartupTime time.Duration
var totalPods int
var nodeCpuCapacity int64
var nodeMemCapacity int64
var nodes *v1.NodeList
var masters sets.String
testCaseBaseName := "density"
missingMeasurements := 0
var testPhaseDurations *timer.TestPhaseTimer
// Gathers data prior to framework namespace teardown
AfterEach(func() {
saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
if saturationThreshold < MinSaturationThreshold {
saturationThreshold = MinSaturationThreshold
}
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
saturationData := framework.SaturationTime{
TimeToSaturate: e2eStartupTime,
NumberOfNodes: nodeCount,
NumberOfPods: totalPods,
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
}
framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
summaries := make([]framework.TestDataSummary, 0, 2)
// Verify latency metrics.
highLatencyRequests, metrics, err := framework.HighLatencyRequests(c, nodeCount)
framework.ExpectNoError(err)
if err == nil {
summaries = append(summaries, metrics)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
}
// Verify scheduler metrics.
// TODO: Reset metrics at the beginning of the test.
// We should do something similar to how we do it for APIserver.
latency, err := framework.VerifySchedulerLatency(c)
framework.ExpectNoError(err)
if err == nil {
summaries = append(summaries, latency)
}
summaries = append(summaries, testPhaseDurations)
framework.PrintSummaries(summaries, testCaseBaseName)
// Fail if more than the allowed threshold of measurements were missing in the latencyTest.
Expect(missingMeasurements <= MaxMissingPodStartupMeasurements).To(Equal(true))
})
options := framework.FrameworkOptions{
ClientQPS: 50.0,
ClientBurst: 100,
}
// Explicitly put here, to delete namespace at the end of the test
// (after measuring latency metrics, etc.).
f := framework.NewFramework(testCaseBaseName, options, nil)
f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
testPhaseDurations = timer.NewTestPhaseTimer()
masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue()
nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value()
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
err := framework.CheckTestingNSDeletedExcept(c, ns)
framework.ExpectNoError(err)
uuid = string(utiluuid.NewUUID())
framework.ExpectNoError(framework.ResetMetrics(c))
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
framework.Logf("Listing nodes for easy debugging:\n")
for _, node := range nodes.Items {
var internalIP, externalIP string
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
internalIP = address.Address
}
if address.Type == v1.NodeExternalIP {
externalIP = address.Address
}
}
framework.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
}
})
type Density struct {
// Controls if e2e latency tests should be run (they are slow)
runLatencyTest bool
podsPerNode int
// Controls how often the apiserver is polled for pods
interval time.Duration
// What kind of resource we should be creating. Default: ReplicationController
kind schema.GroupKind
secretsPerPod int
configMapsPerPod int
daemonsPerNode int
}
densityTests := []Density{
// TODO: Expose runLatencyTest as ginkgo flag.
{podsPerNode: 3, runLatencyTest: false, kind: api.Kind("ReplicationController")},
{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController")},
{podsPerNode: 50, runLatencyTest: false, kind: api.Kind("ReplicationController")},
{podsPerNode: 95, runLatencyTest: true, kind: api.Kind("ReplicationController")},
{podsPerNode: 100, runLatencyTest: false, kind: api.Kind("ReplicationController")},
// Tests for other resource types:
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment")},
{podsPerNode: 30, runLatencyTest: true, kind: batch.Kind("Job")},
// Test scheduling when daemons are preset
{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
// Test with secrets
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
// Test with configmaps
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), configMapsPerPod: 2},
}
for _, testArg := range densityTests {
feature := "ManualPerformance"
switch testArg.podsPerNode {
case 30:
if testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 && testArg.configMapsPerPod == 0 {
feature = "Performance"
}
case 95:
feature = "HighDensityPerformance"
}
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets, %v configmaps and %v daemons",
feature,
testArg.podsPerNode,
testArg.kind,
testArg.secretsPerPod,
testArg.configMapsPerPod,
testArg.daemonsPerNode,
)
itArg := testArg
It(name, func() {
nodePrepPhase := testPhaseDurations.StartPhase(100, "node preparation")
defer nodePrepPhase.End()
nodePreparer := framework.NewE2ETestNodePreparer(
f.ClientSet,
[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
)
framework.ExpectNoError(nodePreparer.PrepareNodes())
defer nodePreparer.CleanupNodes()
podsPerNode := itArg.podsPerNode
if podsPerNode == 30 {
f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
}
totalPods = (podsPerNode - itArg.daemonsPerNode) * nodeCount
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
defer fileHndl.Close()
nodePrepPhase.End()
// nodeCountPerNamespace and CreateNamespaces are defined in load.go
numberOfCollections := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode), testPhaseDurations.StartPhase(200, "namespace creation"))
framework.ExpectNoError(err)
configs := make([]testutils.RunObjectConfig, numberOfCollections)
secretConfigs := make([]*testutils.SecretConfig, 0, numberOfCollections*itArg.secretsPerPod)
configMapConfigs := make([]*testutils.ConfigMapConfig, 0, numberOfCollections*itArg.configMapsPerPod)
// Since all RCs are created at the same time, timeout for each config
// has to assume that it will be run at the very end.
podThroughput := 20
timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
// createClients is defined in load.go
clients, internalClients, err := createClients(numberOfCollections)
for i := 0; i < numberOfCollections; i++ {
nsName := namespaces[i].Name
secretNames := []string{}
for j := 0; j < itArg.secretsPerPod; j++ {
secretName := fmt.Sprintf("density-secret-%v-%v", i, j)
secretConfigs = append(secretConfigs, &testutils.SecretConfig{
Content: map[string]string{"foo": "bar"},
Client: clients[i],
Name: secretName,
Namespace: nsName,
LogFunc: framework.Logf,
})
secretNames = append(secretNames, secretName)
}
configMapNames := []string{}
for j := 0; j < itArg.configMapsPerPod; j++ {
configMapName := fmt.Sprintf("density-configmap-%v-%v", i, j)
configMapConfigs = append(configMapConfigs, &testutils.ConfigMapConfig{
Content: map[string]string{"foo": "bar"},
Client: clients[i],
Name: configMapName,
Namespace: nsName,
LogFunc: framework.Logf,
})
configMapNames = append(configMapNames, configMapName)
}
name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
baseConfig := &testutils.RCConfig{
Client: clients[i],
InternalClient: internalClients[i],
Image: framework.GetPauseImageName(f.ClientSet),
Name: name,
Namespace: nsName,
Labels: map[string]string{"type": "densityPod"},
PollInterval: DensityPollInterval,
Timeout: timeout,
PodStatusFile: fileHndl,
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
CpuRequest: nodeCpuCapacity / 100,
MemRequest: nodeMemCapacity / 100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
LogFunc: framework.Logf,
SecretNames: secretNames,
ConfigMapNames: configMapNames,
}
switch itArg.kind {
case api.Kind("ReplicationController"):
configs[i] = baseConfig
case extensions.Kind("ReplicaSet"):
configs[i] = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
case extensions.Kind("Deployment"):
configs[i] = &testutils.DeploymentConfig{RCConfig: *baseConfig}
case batch.Kind("Job"):
configs[i] = &testutils.JobConfig{RCConfig: *baseConfig}
default:
framework.Failf("Unsupported kind: %v", itArg.kind)
}
}
// Single client is running out of http2 connections in delete phase, hence we need more.
clients, internalClients, err = createClients(2)
dConfig := DensityTestConfig{
ClientSets: clients,
InternalClientsets: internalClients,
Configs: configs,
PodCount: totalPods,
PollInterval: DensityPollInterval,
kind: itArg.kind,
SecretConfigs: secretConfigs,
ConfigMapConfigs: configMapConfigs,
}
for i := 0; i < itArg.daemonsPerNode; i++ {
dConfig.DaemonConfigs = append(dConfig.DaemonConfigs,
&testutils.DaemonConfig{
Client: f.ClientSet,
Name: fmt.Sprintf("density-daemon-%v", i),
Namespace: f.Namespace.Name,
LogFunc: framework.Logf,
})
}
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations)
if itArg.runLatencyTest {
By("Scheduling additional Pods to measure startup latencies")
createTimes := make(map[string]metav1.Time, 0)
nodeNames := make(map[string]string, 0)
scheduleTimes := make(map[string]metav1.Time, 0)
runTimes := make(map[string]metav1.Time, 0)
watchTimes := make(map[string]metav1.Time, 0)
var mutex sync.Mutex
checkPod := func(p *v1.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
if p.Status.Phase == v1.PodRunning {
if _, found := watchTimes[p.Name]; !found {
watchTimes[p.Name] = metav1.Now()
createTimes[p.Name] = p.CreationTimestamp
nodeNames[p.Name] = p.Spec.NodeName
var startTime metav1.Time
for _, cs := range p.Status.ContainerStatuses {
if cs.State.Running != nil {
if startTime.Before(&cs.State.Running.StartedAt) {
startTime = cs.State.Running.StartedAt
}
}
}
if startTime != metav1.NewTime(time.Time{}) {
runTimes[p.Name] = startTime
} else {
framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
}
}
}
}
additionalPodsPrefix = "density-latency-pod"
stopCh := make(chan struct{})
latencyPodStores := make([]cache.Store, len(namespaces))
for i := 0; i < len(namespaces); i++ {
nsName := namespaces[i].Name
latencyPodsStore, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
obj, err := c.CoreV1().Pods(nsName).List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
return c.CoreV1().Pods(nsName).Watch(options)
},
},
&v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*v1.Pod)
if !ok {
framework.Logf("Failed to cast observed object to *v1.Pod.")
}
Expect(ok).To(Equal(true))
go checkPod(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*v1.Pod)
if !ok {
framework.Logf("Failed to cast observed object to *v1.Pod.")
}
Expect(ok).To(Equal(true))
go checkPod(p)
},
},
)
latencyPodStores[i] = latencyPodsStore
go controller.Run(stopCh)
}
// Create some additional pods with throughput ~5 pods/sec.
latencyPodStartupPhase := testPhaseDurations.StartPhase(800, "latency pods creation")
defer latencyPodStartupPhase.End()
var wg sync.WaitGroup
wg.Add(nodeCount)
// Explicitly set requests here.
// Thanks to it we trigger increasing priority function by scheduling
// a pod to a node, which in turn will result in spreading latency pods
// more evenly between nodes.
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
if podsPerNode > 30 {
// This is to make them schedulable on high-density tests
// (e.g. 100 pods/node kubemark).
cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
memRequest = *resource.NewQuantity(0, resource.DecimalSI)
}
rcNameToNsMap := map[string]string{}
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
nsName := namespaces[i%len(namespaces)].Name
rcNameToNsMap[name] = nsName
go createRunningPodFromRC(&wg, c, name, nsName, framework.GetPauseImageName(f.ClientSet), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
latencyPodStartupPhase.End()
latencyMeasurementPhase := testPhaseDurations.StartPhase(810, "pod startup latencies measurement")
defer latencyMeasurementPhase.End()
By("Waiting for all Pods begin observed by the watch...")
waitTimeout := 10 * time.Minute
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < waitTimeout {
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
}
}
close(stopCh)
nodeToLatencyPods := make(map[string]int)
for i := range latencyPodStores {
for _, item := range latencyPodStores[i].List() {
pod := item.(*v1.Pod)
nodeToLatencyPods[pod.Spec.NodeName]++
}
for node, count := range nodeToLatencyPods {
if count > 1 {
framework.Logf("%d latency pods scheduled on %s", count, node)
}
}
}
for i := 0; i < len(namespaces); i++ {
nsName := namespaces[i].Name
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.namespace": nsName,
"source": v1.DefaultSchedulerName,
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
schedEvents, err := c.CoreV1().Events(nsName).List(options)
framework.ExpectNoError(err)
for k := range createTimes {
for _, event := range schedEvents.Items {
if event.InvolvedObject.Name == k {
scheduleTimes[k] = event.FirstTimestamp
break
}
}
}
}
scheduleLag := make([]framework.PodLatencyData, 0)
startupLag := make([]framework.PodLatencyData, 0)
watchLag := make([]framework.PodLatencyData, 0)
schedToWatchLag := make([]framework.PodLatencyData, 0)
e2eLag := make([]framework.PodLatencyData, 0)
for name, create := range createTimes {
sched, ok := scheduleTimes[name]
if !ok {
framework.Logf("Failed to find schedule time for %v", name)
missingMeasurements++
}
run, ok := runTimes[name]
if !ok {
framework.Logf("Failed to find run time for %v", name)
missingMeasurements++
}
watch, ok := watchTimes[name]
if !ok {
framework.Logf("Failed to find watch time for %v", name)
missingMeasurements++
}
node, ok := nodeNames[name]
if !ok {
framework.Logf("Failed to find node for %v", name)
missingMeasurements++
}
scheduleLag = append(scheduleLag, framework.PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)})
startupLag = append(startupLag, framework.PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)})
watchLag = append(watchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)})
schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)})
e2eLag = append(e2eLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)})
}
sort.Sort(framework.LatencySlice(scheduleLag))
sort.Sort(framework.LatencySlice(startupLag))
sort.Sort(framework.LatencySlice(watchLag))
sort.Sort(framework.LatencySlice(schedToWatchLag))
sort.Sort(framework.LatencySlice(e2eLag))
framework.PrintLatencies(scheduleLag, "worst schedule latencies")
framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
framework.PrintLatencies(watchLag, "worst watch latencies")
framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
framework.PrintLatencies(e2eLag, "worst e2e total latencies")
// Test whether e2e pod startup time is acceptable.
podStartupLatency := &framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
f.TestSummaries = append(f.TestSummaries, podStartupLatency)
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
latencyMeasurementPhase.End()
By("Removing additional replication controllers")
podDeletionPhase := testPhaseDurations.StartPhase(820, "latency pods deletion")
defer podDeletionPhase.End()
deleteRC := func(i int) {
defer GinkgoRecover()
name := additionalPodsPrefix + "-" + strconv.Itoa(i+1)
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
}
workqueue.Parallelize(25, nodeCount, deleteRC)
podDeletionPhase.End()
}
cleanupDensityTest(dConfig, testPhaseDurations)
})
}
})
func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
defer GinkgoRecover()
defer wg.Done()
labels := map[string]string{
"type": podType,
"name": name,
}
rc := &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(1),
Selector: labels,
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpuRequest,
v1.ResourceMemory: memRequest,
},
},
},
},
DNSPolicy: v1.DNSDefault,
},
},
},
}
for attempt := 1; attempt <= MaxLatencyPodCreationTries; attempt++ {
_, err := c.CoreV1().ReplicationControllers(ns).Create(rc)
if err == nil || apierrs.IsAlreadyExists(err) {
break
}
Expect(attempt < MaxLatencyPodCreationTries && framework.IsRetryableAPIError(err)).To(Equal(true))
}
framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
framework.Logf("Found pod '%s' running", name)
}
func kindSupportsGarbageCollector(kind schema.GroupKind) bool {
return kind != extensions.Kind("Deployment") && kind != batch.Kind("Job")
}

53
vendor/k8s.io/kubernetes/test/e2e/scalability/empty.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scalability
import (
"time"
"k8s.io/api/core/v1"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = SIGDescribe("Empty [Feature:Empty]", func() {
f := framework.NewDefaultFramework("empty")
BeforeEach(func() {
c := f.ClientSet
ns := f.Namespace.Name
// TODO: respect --allow-notready-nodes flag in those functions.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.WaitForAllNodesHealthy(c, time.Minute)
err := framework.CheckTestingNSDeletedExcept(c, ns)
framework.ExpectNoError(err)
})
It("starts a pod", func() {
configs, _, _ := GenerateConfigsForGroup([]*v1.Namespace{f.Namespace}, "empty-pod", 1, 1, framework.GetPauseImageName(f.ClientSet), []string{}, api.Kind("ReplicationController"), 0, 0)
if len(configs) != 1 {
framework.Failf("generateConfigs should have generated single config")
}
config := configs[0]
config.SetClient(f.ClientSet)
framework.ExpectNoError(config.Run())
})
})

View File

@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scalability
import "github.com/onsi/ginkgo"
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-scalability] "+text, body)
}

653
vendor/k8s.io/kubernetes/test/e2e/scalability/load.go generated vendored Normal file
View File

@ -0,0 +1,653 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scalability
import (
"fmt"
"math"
"math/rand"
"net"
"net/http"
"os"
"strconv"
"sync"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/transport"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
smallGroupSize = 5
mediumGroupSize = 30
bigGroupSize = 250
smallGroupName = "load-small"
mediumGroupName = "load-medium"
bigGroupName = "load-big"
// We start RCs/Services/pods/... in different namespace in this test.
// nodeCountPerNamespace determines how many namespaces we will be using
// depending on the number of nodes in the underlying cluster.
nodeCountPerNamespace = 100
// How many threads will be used to create/delete services during this test.
serviceOperationsParallelism = 1
svcLabelKey = "svc-label"
)
var randomKind = schema.GroupKind{Kind: "Random"}
var knownKinds = []schema.GroupKind{
api.Kind("ReplicationController"),
extensions.Kind("Deployment"),
// TODO: uncomment when Jobs are fixed: #38497
//batch.Kind("Job"),
extensions.Kind("ReplicaSet"),
}
// This test suite can take a long time to run, so by default it is added to
// the ginkgo.skip list (see driver.go).
// To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag.
var _ = SIGDescribe("Load capacity", func() {
var clientset clientset.Interface
var nodeCount int
var ns string
var configs []testutils.RunObjectConfig
var secretConfigs []*testutils.SecretConfig
var configMapConfigs []*testutils.ConfigMapConfig
testCaseBaseName := "load"
var testPhaseDurations *timer.TestPhaseTimer
// Gathers metrics before teardown
// TODO add flag that allows to skip cleanup on failure
AfterEach(func() {
// Verify latency metrics
highLatencyRequests, metrics, err := framework.HighLatencyRequests(clientset, nodeCount)
framework.ExpectNoError(err)
if err == nil {
summaries := make([]framework.TestDataSummary, 0, 2)
summaries = append(summaries, metrics)
summaries = append(summaries, testPhaseDurations)
framework.PrintSummaries(summaries, testCaseBaseName)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
}
})
// We assume a default throughput of 10 pods/second throughput.
// We may want to revisit it in the future.
// However, this can be overriden by LOAD_TEST_THROUGHPUT env var.
throughput := 10
if throughputEnv := os.Getenv("LOAD_TEST_THROUGHPUT"); throughputEnv != "" {
if newThroughput, err := strconv.Atoi(throughputEnv); err == nil {
throughput = newThroughput
}
}
// Explicitly put here, to delete namespace at the end of the test
// (after measuring latency metrics, etc.).
options := framework.FrameworkOptions{
ClientQPS: float32(math.Max(50.0, float64(2*throughput))),
ClientBurst: int(math.Max(100.0, float64(4*throughput))),
}
f := framework.NewFramework(testCaseBaseName, options, nil)
f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
testPhaseDurations = timer.NewTestPhaseTimer()
clientset = f.ClientSet
ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(clientset)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
err := framework.CheckTestingNSDeletedExcept(clientset, ns)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.ResetMetrics(clientset))
})
type Load struct {
podsPerNode int
image string
command []string
// What kind of resource we want to create
kind schema.GroupKind
services bool
secretsPerPod int
configMapsPerPod int
daemonsPerNode int
}
loadTests := []Load{
// The container will consume 1 cpu and 512mb of memory.
{podsPerNode: 3, image: "jess/stress", command: []string{"stress", "-c", "1", "-m", "2"}, kind: api.Kind("ReplicationController")},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: api.Kind("ReplicationController")},
// Tests for other resource types
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: extensions.Kind("Deployment")},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: batch.Kind("Job")},
// Test scheduling when daemons are preset
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
// Test with secrets
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
// Test with configmaps
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: extensions.Kind("Deployment"), configMapsPerPod: 2},
// Special test case which randomizes created resources
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: randomKind},
}
for _, testArg := range loadTests {
feature := "ManualPerformance"
if testArg.podsPerNode == 30 && testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 && testArg.configMapsPerPod == 0 {
feature = "Performance"
}
name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node %v with %v secrets, %v configmaps and %v daemons",
feature,
testArg.podsPerNode,
testArg.kind,
testArg.secretsPerPod,
testArg.configMapsPerPod,
testArg.daemonsPerNode,
)
itArg := testArg
itArg.services = os.Getenv("CREATE_SERVICES") != "false"
It(name, func() {
// Create a number of namespaces.
namespaceCount := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode), testPhaseDurations.StartPhase(110, "namespace creation"))
framework.ExpectNoError(err)
totalPods := (itArg.podsPerNode - itArg.daemonsPerNode) * nodeCount
configs, secretConfigs, configMapConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod, itArg.configMapsPerPod)
serviceCreationPhase := testPhaseDurations.StartPhase(120, "services creation")
defer serviceCreationPhase.End()
if itArg.services {
framework.Logf("Creating services")
services := generateServicesForConfigs(configs)
createService := func(i int) {
defer GinkgoRecover()
_, err := clientset.CoreV1().Services(services[i].Namespace).Create(services[i])
framework.ExpectNoError(err)
}
workqueue.Parallelize(serviceOperationsParallelism, len(services), createService)
framework.Logf("%v Services created.", len(services))
defer func(services []*v1.Service) {
serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion")
defer serviceCleanupPhase.End()
framework.Logf("Starting to delete services...")
deleteService := func(i int) {
defer GinkgoRecover()
err := clientset.CoreV1().Services(services[i].Namespace).Delete(services[i].Name, nil)
framework.ExpectNoError(err)
}
workqueue.Parallelize(serviceOperationsParallelism, len(services), deleteService)
framework.Logf("Services deleted")
}(services)
} else {
framework.Logf("Skipping service creation")
}
serviceCreationPhase.End()
// Create all secrets.
secretsCreationPhase := testPhaseDurations.StartPhase(130, "secrets creation")
defer secretsCreationPhase.End()
for i := range secretConfigs {
secretConfigs[i].Run()
defer secretConfigs[i].Stop()
}
secretsCreationPhase.End()
// Create all configmaps.
configMapsCreationPhase := testPhaseDurations.StartPhase(140, "configmaps creation")
defer configMapsCreationPhase.End()
for i := range configMapConfigs {
configMapConfigs[i].Run()
defer configMapConfigs[i].Stop()
}
configMapsCreationPhase.End()
// StartDaemon if needed
daemonSetCreationPhase := testPhaseDurations.StartPhase(150, "daemonsets creation")
defer daemonSetCreationPhase.End()
for i := 0; i < itArg.daemonsPerNode; i++ {
daemonName := fmt.Sprintf("load-daemon-%v", i)
daemonConfig := &testutils.DaemonConfig{
Client: f.ClientSet,
Name: daemonName,
Namespace: f.Namespace.Name,
LogFunc: framework.Logf,
}
daemonConfig.Run()
defer func(config *testutils.DaemonConfig) {
framework.ExpectNoError(framework.DeleteResourceAndPods(
f.ClientSet,
f.InternalClientset,
extensions.Kind("DaemonSet"),
config.Namespace,
config.Name,
))
}(daemonConfig)
}
daemonSetCreationPhase.End()
// Simulate lifetime of RC:
// * create with initial size
// * scale RC to a random size and list all pods
// * scale RC to a random size and list all pods
// * delete it
//
// This will generate ~5 creations/deletions per second assuming:
// - X small RCs each 5 pods [ 5 * X = totalPods / 2 ]
// - Y medium RCs each 30 pods [ 30 * Y = totalPods / 4 ]
// - Z big RCs each 250 pods [ 250 * Z = totalPods / 4]
// We would like to spread creating replication controllers over time
// to make it possible to create/schedule them in the meantime.
// Currently we assume <throughput> pods/second average throughput.
// We may want to revisit it in the future.
framework.Logf("Starting to create %v objects...", itArg.kind)
creatingTime := time.Duration(totalPods/throughput) * time.Second
createAllResources(configs, creatingTime, testPhaseDurations.StartPhase(200, "load pods creation"))
By("============================================================================")
// We would like to spread scaling replication controllers over time
// to make it possible to create/schedule & delete them in the meantime.
// Currently we assume that <throughput> pods/second average throughput.
// The expected number of created/deleted pods is less than totalPods/3.
scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second
framework.Logf("Starting to scale %v objects first time...", itArg.kind)
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
By("============================================================================")
framework.Logf("Starting to scale %v objects second time...", itArg.kind)
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(400, "scaling second time"))
By("============================================================================")
// Cleanup all created replication controllers.
// Currently we assume <throughput> pods/second average deletion throughput.
// We may want to revisit it in the future.
deletingTime := time.Duration(totalPods/throughput) * time.Second
framework.Logf("Starting to delete %v objects...", itArg.kind)
deleteAllResources(configs, deletingTime, testPhaseDurations.StartPhase(500, "load pods deletion"))
})
}
})
func createClients(numberOfClients int) ([]clientset.Interface, []internalclientset.Interface, error) {
clients := make([]clientset.Interface, numberOfClients)
internalClients := make([]internalclientset.Interface, numberOfClients)
for i := 0; i < numberOfClients; i++ {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred())
config.QPS = 100
config.Burst = 200
if framework.TestContext.KubeAPIContentType != "" {
config.ContentType = framework.TestContext.KubeAPIContentType
}
// For the purpose of this test, we want to force that clients
// do not share underlying transport (which is a default behavior
// in Kubernetes). Thus, we are explicitly creating transport for
// each client here.
transportConfig, err := config.TransportConfig()
if err != nil {
return nil, nil, err
}
tlsConfig, err := transport.TLSConfigFor(transportConfig)
if err != nil {
return nil, nil, err
}
config.Transport = utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
MaxIdleConnsPerHost: 100,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
})
// Overwrite TLS-related fields from config to avoid collision with
// Transport field.
config.TLSClientConfig = restclient.TLSClientConfig{}
c, err := clientset.NewForConfig(config)
if err != nil {
return nil, nil, err
}
clients[i] = c
internalClient, err := internalclientset.NewForConfig(config)
if err != nil {
return nil, nil, err
}
internalClients[i] = internalClient
}
return clients, internalClients, nil
}
func computePodCounts(total int) (int, int, int) {
// Small RCs owns ~0.5 of total number of pods, medium and big RCs ~0.25 each.
// For example for 3000 pods (100 nodes, 30 pods per node) there are:
// - 300 small RCs each 5 pods
// - 25 medium RCs each 30 pods
// - 3 big RCs each 250 pods
bigGroupCount := total / 4 / bigGroupSize
total -= bigGroupCount * bigGroupSize
mediumGroupCount := total / 3 / mediumGroupSize
total -= mediumGroupCount * mediumGroupSize
smallGroupCount := total / smallGroupSize
return smallGroupCount, mediumGroupCount, bigGroupCount
}
func generateConfigs(
totalPods int,
image string,
command []string,
nss []*v1.Namespace,
kind schema.GroupKind,
secretsPerPod int,
configMapsPerPod int,
) ([]testutils.RunObjectConfig, []*testutils.SecretConfig, []*testutils.ConfigMapConfig) {
configs := make([]testutils.RunObjectConfig, 0)
secretConfigs := make([]*testutils.SecretConfig, 0)
configMapConfigs := make([]*testutils.ConfigMapConfig, 0)
smallGroupCount, mediumGroupCount, bigGroupCount := computePodCounts(totalPods)
newConfigs, newSecretConfigs, newConfigMapConfigs := GenerateConfigsForGroup(nss, smallGroupName, smallGroupSize, smallGroupCount, image, command, kind, secretsPerPod, configMapsPerPod)
configs = append(configs, newConfigs...)
secretConfigs = append(secretConfigs, newSecretConfigs...)
configMapConfigs = append(configMapConfigs, newConfigMapConfigs...)
newConfigs, newSecretConfigs, newConfigMapConfigs = GenerateConfigsForGroup(nss, mediumGroupName, mediumGroupSize, mediumGroupCount, image, command, kind, secretsPerPod, configMapsPerPod)
configs = append(configs, newConfigs...)
secretConfigs = append(secretConfigs, newSecretConfigs...)
configMapConfigs = append(configMapConfigs, newConfigMapConfigs...)
newConfigs, newSecretConfigs, newConfigMapConfigs = GenerateConfigsForGroup(nss, bigGroupName, bigGroupSize, bigGroupCount, image, command, kind, secretsPerPod, configMapsPerPod)
configs = append(configs, newConfigs...)
secretConfigs = append(secretConfigs, newSecretConfigs...)
configMapConfigs = append(configMapConfigs, newConfigMapConfigs...)
// Create a number of clients to better simulate real usecase
// where not everyone is using exactly the same client.
rcsPerClient := 20
clients, internalClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient)
framework.ExpectNoError(err)
for i := 0; i < len(configs); i++ {
configs[i].SetClient(clients[i%len(clients)])
configs[i].SetInternalClient(internalClients[i%len(internalClients)])
}
for i := 0; i < len(secretConfigs); i++ {
secretConfigs[i].Client = clients[i%len(clients)]
}
for i := 0; i < len(configMapConfigs); i++ {
configMapConfigs[i].Client = clients[i%len(clients)]
}
return configs, secretConfigs, configMapConfigs
}
func GenerateConfigsForGroup(
nss []*v1.Namespace,
groupName string,
size, count int,
image string,
command []string,
kind schema.GroupKind,
secretsPerPod int,
configMapsPerPod int,
) ([]testutils.RunObjectConfig, []*testutils.SecretConfig, []*testutils.ConfigMapConfig) {
configs := make([]testutils.RunObjectConfig, 0, count)
secretConfigs := make([]*testutils.SecretConfig, 0, count*secretsPerPod)
configMapConfigs := make([]*testutils.ConfigMapConfig, 0, count*configMapsPerPod)
savedKind := kind
for i := 1; i <= count; i++ {
kind = savedKind
namespace := nss[i%len(nss)].Name
secretNames := make([]string, 0, secretsPerPod)
configMapNames := make([]string, 0, configMapsPerPod)
for j := 0; j < secretsPerPod; j++ {
secretName := fmt.Sprintf("%v-%v-secret-%v", groupName, i, j)
secretConfigs = append(secretConfigs, &testutils.SecretConfig{
Content: map[string]string{"foo": "bar"},
Client: nil, // this will be overwritten later
Name: secretName,
Namespace: namespace,
LogFunc: framework.Logf,
})
secretNames = append(secretNames, secretName)
}
for j := 0; j < configMapsPerPod; j++ {
configMapName := fmt.Sprintf("%v-%v-configmap-%v", groupName, i, j)
configMapConfigs = append(configMapConfigs, &testutils.ConfigMapConfig{
Content: map[string]string{"foo": "bar"},
Client: nil, // this will be overwritten later
Name: configMapName,
Namespace: namespace,
LogFunc: framework.Logf,
})
configMapNames = append(configMapNames, configMapName)
}
baseConfig := &testutils.RCConfig{
Client: nil, // this will be overwritten later
InternalClient: nil, // this will be overwritten later
Name: groupName + "-" + strconv.Itoa(i),
Namespace: namespace,
Timeout: 10 * time.Minute,
Image: image,
Command: command,
Replicas: size,
CpuRequest: 10, // 0.01 core
MemRequest: 26214400, // 25MB
SecretNames: secretNames,
ConfigMapNames: configMapNames,
// Define a label to group every 2 RCs into one service.
Labels: map[string]string{svcLabelKey: groupName + "-" + strconv.Itoa((i+1)/2)},
}
if kind == randomKind {
kind = knownKinds[rand.Int()%len(knownKinds)]
}
var config testutils.RunObjectConfig
switch kind {
case api.Kind("ReplicationController"):
config = baseConfig
case extensions.Kind("ReplicaSet"):
config = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
case extensions.Kind("Deployment"):
config = &testutils.DeploymentConfig{RCConfig: *baseConfig}
case batch.Kind("Job"):
config = &testutils.JobConfig{RCConfig: *baseConfig}
default:
framework.Failf("Unsupported kind for config creation: %v", kind)
}
configs = append(configs, config)
}
return configs, secretConfigs, configMapConfigs
}
func generateServicesForConfigs(configs []testutils.RunObjectConfig) []*v1.Service {
services := make([]*v1.Service, 0)
currentSvcLabel := ""
for _, config := range configs {
svcLabel, found := config.GetLabelValue(svcLabelKey)
if !found || svcLabel == currentSvcLabel {
continue
}
currentSvcLabel = svcLabel
serviceName := config.GetName() + "-svc"
labels := map[string]string{
"name": config.GetName(),
svcLabelKey: currentSvcLabel,
}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: config.GetNamespace(),
},
Spec: v1.ServiceSpec{
Selector: labels,
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
services = append(services, service)
}
return services
}
func sleepUpTo(d time.Duration) {
if d.Nanoseconds() > 0 {
time.Sleep(time.Duration(rand.Int63n(d.Nanoseconds())))
}
}
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
backoff := wait.Backoff{
Duration: initialDuration,
Factor: 3,
Jitter: 0,
Steps: 6,
}
return wait.ExponentialBackoff(backoff, fn)
}
func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration, testPhase *timer.Phase) {
defer testPhase.End()
var wg sync.WaitGroup
wg.Add(len(configs))
for _, config := range configs {
go createResource(&wg, config, creatingTime)
}
wg.Wait()
}
func createResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, creatingTime time.Duration) {
defer GinkgoRecover()
defer wg.Done()
sleepUpTo(creatingTime)
framework.ExpectNoError(config.Run(), fmt.Sprintf("creating %v %s", config.GetKind(), config.GetName()))
}
func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration, testPhase *timer.Phase) {
defer testPhase.End()
var wg sync.WaitGroup
wg.Add(len(configs))
for _, config := range configs {
go scaleResource(&wg, config, scalingTime)
}
wg.Wait()
}
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
// Scaling happens always based on original size, not the current size.
func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scalingTime time.Duration) {
defer GinkgoRecover()
defer wg.Done()
sleepUpTo(scalingTime)
newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2)
framework.ExpectNoError(framework.ScaleResource(
config.GetClient(), config.GetInternalClient(), config.GetNamespace(), config.GetName(), newSize, true, config.GetKind()),
fmt.Sprintf("scaling %v %v", config.GetKind(), config.GetName()))
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.GetName()}))
options := metav1.ListOptions{
LabelSelector: selector.String(),
ResourceVersion: "0",
}
listResourcePodsFunc := func() (bool, error) {
_, err := config.GetClient().CoreV1().Pods(config.GetNamespace()).List(options)
if err == nil {
return true, nil
}
framework.Logf("Failed to list pods from %v %v due to: %v", config.GetKind(), config.GetName(), err)
if framework.IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to list pods from %v %v with non-retriable error: %v", config.GetKind(), config.GetName(), err)
}
err := retryWithExponentialBackOff(100*time.Millisecond, listResourcePodsFunc)
framework.ExpectNoError(err)
}
func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration, testPhase *timer.Phase) {
defer testPhase.End()
var wg sync.WaitGroup
wg.Add(len(configs))
for _, config := range configs {
go deleteResource(&wg, config, deletingTime)
}
wg.Wait()
}
func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deletingTime time.Duration) {
defer GinkgoRecover()
defer wg.Done()
sleepUpTo(deletingTime)
if framework.TestContext.GarbageCollectorEnabled && config.GetKind() != extensions.Kind("Deployment") {
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
config.GetClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
} else {
framework.ExpectNoError(framework.DeleteResourceAndPods(
config.GetClient(), config.GetInternalClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
}
}
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) {
defer testPhase.End()
namespaces := []*v1.Namespace{}
for i := 1; i <= namespaceCount; i++ {
namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil)
if err != nil {
return []*v1.Namespace{}, err
}
namespaces = append(namespaces, namespace)
}
return namespaces, nil
}