Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"density.go",
"empty.go",
"framework.go",
"load.go",
],
@ -16,34 +15,34 @@ go_library(
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/transport:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/timer:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/restmapper:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/transport:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)

View File

@ -17,6 +17,7 @@ limitations under the License.
package scalability
import (
"context"
"fmt"
"math"
"os"
@ -56,6 +57,7 @@ const (
MinSaturationThreshold = 2 * time.Minute
MinPodsPerSecondThroughput = 8
DensityPollInterval = 10 * time.Second
MinPodStartupMeasurements = 500
)
// Maximum container failures this test tolerates before failing.
@ -64,6 +66,9 @@ var MaxContainerFailures = 0
// Maximum no. of missing measurements related to pod-startup that the test tolerates.
var MaxMissingPodStartupMeasurements = 0
// Number of nodes in the cluster (computed inside BeforeEach).
var nodeCount = 0
type DensityTestConfig struct {
Configs []testutils.RunObjectConfig
ClientSets []clientset.Interface
@ -168,9 +173,9 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
}
} else {
if numNodes <= 100 {
apiserverCPU = 1.8
apiserverCPU = 2.2
apiserverMem = 1700 * (1024 * 1024)
controllerCPU = 0.6
controllerCPU = 0.8
controllerMem = 530 * (1024 * 1024)
schedulerCPU = 0.4
schedulerMem = 180 * (1024 * 1024)
@ -219,6 +224,14 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
CPUConstraint: schedulerCPU,
MemoryConstraint: schedulerMem,
}
constraints["coredns"] = framework.ResourceConstraint{
CPUConstraint: framework.NoCPUConstraint,
MemoryConstraint: 170 * (1024 * 1024),
}
constraints["kubedns"] = framework.ResourceConstraint{
CPUConstraint: framework.NoCPUConstraint,
MemoryConstraint: 170 * (1024 * 1024),
}
return constraints
}
@ -285,6 +298,11 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
replicationCtrlStartupPhase := testPhaseDurations.StartPhase(300, "saturation pods creation")
defer replicationCtrlStartupPhase.End()
// Start scheduler CPU profile-gatherer before we begin cluster saturation.
profileGatheringDelay := time.Duration(1+nodeCount/100) * time.Minute
schedulerProfilingStopCh := framework.StartCPUProfileGatherer("kube-scheduler", "density", profileGatheringDelay)
// Start all replication controllers.
startTime := time.Now()
wg := sync.WaitGroup{}
@ -304,10 +322,16 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
wg.Wait()
startupTime := time.Since(startTime)
close(logStopCh)
close(schedulerProfilingStopCh)
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
replicationCtrlStartupPhase.End()
// Grabbing scheduler memory profile after cluster saturation finished.
wg.Add(1)
framework.GatherMemoryProfile("kube-scheduler", "density", &wg)
wg.Wait()
printPodAllocationPhase := testPhaseDurations.StartPhase(400, "printing pod allocation")
defer printPodAllocationPhase.End()
// Print some data about Pod to Node allocation
@ -366,7 +390,6 @@ func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPha
// limits on Docker's concurrent container startup.
var _ = SIGDescribe("Density", func() {
var c clientset.Interface
var nodeCount int
var additionalPodsPrefix string
var ns string
var uuid string
@ -381,6 +404,7 @@ var _ = SIGDescribe("Density", func() {
missingMeasurements := 0
var testPhaseDurations *timer.TestPhaseTimer
var profileGathererStopCh chan struct{}
var etcdMetricsCollector *framework.EtcdMetricsCollector
// Gathers data prior to framework namespace teardown
AfterEach(func() {
@ -388,7 +412,7 @@ var _ = SIGDescribe("Density", func() {
close(profileGathererStopCh)
wg := sync.WaitGroup{}
wg.Add(1)
framework.GatherApiserverMemoryProfile(&wg, "density")
framework.GatherMemoryProfile("kube-apiserver", "density", &wg)
wg.Wait()
saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
@ -412,7 +436,7 @@ var _ = SIGDescribe("Density", func() {
summaries = append(summaries, metrics)
}
// Verify scheduler metrics.
// Summarize scheduler metrics.
latency, err := framework.VerifySchedulerLatency(c)
framework.ExpectNoError(err)
if err == nil {
@ -428,6 +452,14 @@ var _ = SIGDescribe("Density", func() {
}
summaries = append(summaries, latency)
}
// Summarize etcd metrics.
err = etcdMetricsCollector.StopAndSummarize()
framework.ExpectNoError(err)
if err == nil {
summaries = append(summaries, etcdMetricsCollector.GetMetrics())
}
summaries = append(summaries, testPhaseDurations)
framework.PrintSummaries(summaries, testCaseBaseName)
@ -452,6 +484,18 @@ var _ = SIGDescribe("Density", func() {
ns = f.Namespace.Name
testPhaseDurations = timer.NewTestPhaseTimer()
// This is used to mimic what new service account token volumes will
// eventually look like. We can remove this once the controller manager
// publishes the root CA certificate to each namespace.
c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-root-ca-crt",
},
Data: map[string]string{
"ca.crt": "trust me, i'm a ca.crt",
},
})
_, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
@ -487,7 +531,11 @@ var _ = SIGDescribe("Density", func() {
// Start apiserver CPU profile gatherer with frequency based on cluster size.
profileGatheringDelay := time.Duration(5+nodeCount/100) * time.Minute
profileGathererStopCh = framework.StartApiserverCPUProfileGatherer(profileGatheringDelay)
profileGathererStopCh = framework.StartCPUProfileGatherer("kube-apiserver", "density", profileGatheringDelay)
// Start etcs metrics collection.
etcdMetricsCollector = framework.NewEtcdMetricsCollector()
etcdMetricsCollector.StartCollecting(time.Minute)
})
type Density struct {
@ -497,11 +545,12 @@ var _ = SIGDescribe("Density", func() {
// Controls how often the apiserver is polled for pods
interval time.Duration
// What kind of resource we should be creating. Default: ReplicationController
kind schema.GroupKind
secretsPerPod int
configMapsPerPod int
daemonsPerNode int
quotas bool
kind schema.GroupKind
secretsPerPod int
configMapsPerPod int
svcacctTokenProjectionsPerPod int
daemonsPerNode int
quotas bool
}
densityTests := []Density{
@ -520,6 +569,8 @@ var _ = SIGDescribe("Density", func() {
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
// Test with configmaps
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), configMapsPerPod: 2},
// Test with service account projected volumes
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), svcacctTokenProjectionsPerPod: 2},
// Test with quotas
{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController"), quotas: true},
}
@ -539,12 +590,13 @@ var _ = SIGDescribe("Density", func() {
feature = "HighDensityPerformance"
}
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets, %v configmaps and %v daemons",
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets, %v configmaps, %v token projections, and %v daemons",
feature,
testArg.podsPerNode,
testArg.kind,
testArg.secretsPerPod,
testArg.configMapsPerPod,
testArg.svcacctTokenProjectionsPerPod,
testArg.daemonsPerNode,
)
if testArg.quotas {
@ -588,6 +640,7 @@ var _ = SIGDescribe("Density", func() {
timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
// createClients is defined in load.go
clients, internalClients, scalesClients, err := createClients(numberOfCollections)
framework.ExpectNoError(err)
for i := 0; i < numberOfCollections; i++ {
nsName := namespaces[i].Name
secretNames := []string{}
@ -616,24 +669,25 @@ var _ = SIGDescribe("Density", func() {
}
name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
baseConfig := &testutils.RCConfig{
Client: clients[i],
InternalClient: internalClients[i],
ScalesGetter: scalesClients[i],
Image: imageutils.GetPauseImageName(),
Name: name,
Namespace: nsName,
Labels: map[string]string{"type": "densityPod"},
PollInterval: DensityPollInterval,
Timeout: timeout,
PodStatusFile: fileHndl,
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
CpuRequest: nodeCpuCapacity / 100,
MemRequest: nodeMemCapacity / 100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
LogFunc: framework.Logf,
SecretNames: secretNames,
ConfigMapNames: configMapNames,
Client: clients[i],
InternalClient: internalClients[i],
ScalesGetter: scalesClients[i],
Image: imageutils.GetPauseImageName(),
Name: name,
Namespace: nsName,
Labels: map[string]string{"type": "densityPod"},
PollInterval: DensityPollInterval,
Timeout: timeout,
PodStatusFile: fileHndl,
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
CpuRequest: nodeCpuCapacity / 100,
MemRequest: nodeMemCapacity / 100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
LogFunc: framework.Logf,
SecretNames: secretNames,
ConfigMapNames: configMapNames,
ServiceAccountTokenProjections: itArg.svcacctTokenProjectionsPerPod,
}
switch itArg.kind {
case api.Kind("ReplicationController"):
@ -651,7 +705,7 @@ var _ = SIGDescribe("Density", func() {
// Single client is running out of http2 connections in delete phase, hence we need more.
clients, internalClients, scalesClients, err = createClients(2)
framework.ExpectNoError(err)
dConfig := DensityTestConfig{
ClientSets: clients,
InternalClientsets: internalClients,
@ -674,8 +728,13 @@ var _ = SIGDescribe("Density", func() {
})
}
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, &scheduleThroughputs)
defer cleanupDensityTest(dConfig, testPhaseDurations)
if itArg.runLatencyTest {
By("Scheduling additional Pods to measure startup latencies")
// Pick latencyPodsIterations so that:
// latencyPodsIterations * nodeCount >= MinPodStartupMeasurements.
latencyPodsIterations := (MinPodStartupMeasurements + nodeCount - 1) / nodeCount
By(fmt.Sprintf("Scheduling additional %d Pods to measure startup latencies", latencyPodsIterations*nodeCount))
createTimes := make(map[string]metav1.Time, 0)
nodeNames := make(map[string]string, 0)
@ -754,58 +813,76 @@ var _ = SIGDescribe("Density", func() {
go controller.Run(stopCh)
}
for latencyPodsIteration := 0; latencyPodsIteration < latencyPodsIterations; latencyPodsIteration++ {
podIndexOffset := latencyPodsIteration * nodeCount
framework.Logf("Creating %d latency pods in range [%d, %d]", nodeCount, podIndexOffset+1, podIndexOffset+nodeCount)
// Create some additional pods with throughput ~5 pods/sec.
latencyPodStartupPhase := testPhaseDurations.StartPhase(800, "latency pods creation")
defer latencyPodStartupPhase.End()
var wg sync.WaitGroup
wg.Add(nodeCount)
// Explicitly set requests here.
// Thanks to it we trigger increasing priority function by scheduling
// a pod to a node, which in turn will result in spreading latency pods
// more evenly between nodes.
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
if podsPerNode > 30 {
// This is to make them schedulable on high-density tests
// (e.g. 100 pods/node kubemark).
cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
memRequest = *resource.NewQuantity(0, resource.DecimalSI)
}
rcNameToNsMap := map[string]string{}
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
nsName := namespaces[i%len(namespaces)].Name
rcNameToNsMap[name] = nsName
go createRunningPodFromRC(&wg, c, name, nsName, imageutils.GetPauseImageName(), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
latencyPodStartupPhase.End()
watchTimesLen := len(watchTimes)
latencyMeasurementPhase := testPhaseDurations.StartPhase(810, "pod startup latencies measurement")
defer latencyMeasurementPhase.End()
By("Waiting for all Pods begin observed by the watch...")
waitTimeout := 10 * time.Minute
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < waitTimeout {
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
// Create some additional pods with throughput ~5 pods/sec.
latencyPodStartupPhase := testPhaseDurations.StartPhase(800+latencyPodsIteration*10, "latency pods creation")
defer latencyPodStartupPhase.End()
var wg sync.WaitGroup
wg.Add(nodeCount)
// Explicitly set requests here.
// Thanks to it we trigger increasing priority function by scheduling
// a pod to a node, which in turn will result in spreading latency pods
// more evenly between nodes.
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
if podsPerNode > 30 {
// This is to make them schedulable on high-density tests
// (e.g. 100 pods/node kubemark).
cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
memRequest = *resource.NewQuantity(0, resource.DecimalSI)
}
}
close(stopCh)
nodeToLatencyPods := make(map[string]int)
for i := range latencyPodStores {
for _, item := range latencyPodStores[i].List() {
pod := item.(*v1.Pod)
nodeToLatencyPods[pod.Spec.NodeName]++
rcNameToNsMap := map[string]string{}
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(podIndexOffset+i)
nsName := namespaces[i%len(namespaces)].Name
rcNameToNsMap[name] = nsName
go createRunningPodFromRC(&wg, c, name, nsName, imageutils.GetPauseImageName(), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200 * time.Millisecond)
}
for node, count := range nodeToLatencyPods {
if count > 1 {
framework.Logf("%d latency pods scheduled on %s", count, node)
wg.Wait()
latencyPodStartupPhase.End()
latencyMeasurementPhase := testPhaseDurations.StartPhase(801+latencyPodsIteration*10, "pod startup latencies measurement")
defer latencyMeasurementPhase.End()
By("Waiting for all Pods begin observed by the watch...")
waitTimeout := 10 * time.Minute
for start := time.Now(); len(watchTimes) < watchTimesLen+nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < waitTimeout {
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
}
}
nodeToLatencyPods := make(map[string]int)
for i := range latencyPodStores {
for _, item := range latencyPodStores[i].List() {
pod := item.(*v1.Pod)
nodeToLatencyPods[pod.Spec.NodeName]++
}
for node, count := range nodeToLatencyPods {
if count > 1 {
framework.Logf("%d latency pods scheduled on %s", count, node)
}
}
}
latencyMeasurementPhase.End()
By("Removing additional replication controllers")
podDeletionPhase := testPhaseDurations.StartPhase(802+latencyPodsIteration*10, "latency pods deletion")
defer podDeletionPhase.End()
deleteRC := func(i int) {
defer GinkgoRecover()
name := additionalPodsPrefix + "-" + strconv.Itoa(podIndexOffset+i+1)
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
}
workqueue.ParallelizeUntil(context.TODO(), 25, nodeCount, deleteRC)
podDeletionPhase.End()
}
close(stopCh)
for i := 0; i < len(namespaces); i++ {
nsName := namespaces[i].Name
@ -893,20 +970,7 @@ var _ = SIGDescribe("Density", func() {
framework.ExpectNoError(framework.VerifyLatencyWithinThreshold(podStartupLatencyThreshold, podStartupLatency.E2ELatency, "pod startup"))
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
latencyMeasurementPhase.End()
By("Removing additional replication controllers")
podDeletionPhase := testPhaseDurations.StartPhase(820, "latency pods deletion")
defer podDeletionPhase.End()
deleteRC := func(i int) {
defer GinkgoRecover()
name := additionalPodsPrefix + "-" + strconv.Itoa(i+1)
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
}
workqueue.Parallelize(25, nodeCount, deleteRC)
podDeletionPhase.End()
}
cleanupDensityTest(dConfig, testPhaseDurations)
})
}
})

View File

@ -1,54 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scalability
import (
"time"
"k8s.io/api/core/v1"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
var _ = SIGDescribe("Empty [Feature:Empty]", func() {
f := framework.NewDefaultFramework("empty")
BeforeEach(func() {
c := f.ClientSet
ns := f.Namespace.Name
// TODO: respect --allow-notready-nodes flag in those functions.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.WaitForAllNodesHealthy(c, time.Minute)
err := framework.CheckTestingNSDeletedExcept(c, ns)
framework.ExpectNoError(err)
})
It("starts a pod", func() {
configs, _, _ := GenerateConfigsForGroup([]*v1.Namespace{f.Namespace}, "empty-pod", 1, 1, imageutils.GetPauseImageName(), []string{}, api.Kind("ReplicationController"), 0, 0)
if len(configs) != 1 {
framework.Failf("generateConfigs should have generated single config")
}
config := configs[0]
config.SetClient(f.ClientSet)
framework.ExpectNoError(config.Run())
})
})

View File

@ -17,6 +17,7 @@ limitations under the License.
package scalability
import (
"context"
"fmt"
"math"
"math/rand"
@ -37,11 +38,14 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
cacheddiscovery "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/transport"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/extensions"
@ -52,9 +56,6 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/restmapper"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
const (
@ -106,7 +107,7 @@ var _ = SIGDescribe("Load capacity", func() {
close(profileGathererStopCh)
wg := sync.WaitGroup{}
wg.Add(1)
framework.GatherApiserverMemoryProfile(&wg, "load")
framework.GatherMemoryProfile("kube-apiserver", "load", &wg)
wg.Wait()
// Verify latency metrics
@ -159,7 +160,7 @@ var _ = SIGDescribe("Load capacity", func() {
// Start apiserver CPU profile gatherer with frequency based on cluster size.
profileGatheringDelay := time.Duration(5+nodeCount/100) * time.Minute
profileGathererStopCh = framework.StartApiserverCPUProfileGatherer(profileGatheringDelay)
profileGathererStopCh = framework.StartCPUProfileGatherer("kube-apiserver", "load", profileGatheringDelay)
})
type Load struct {
@ -231,6 +232,8 @@ var _ = SIGDescribe("Load capacity", func() {
framework.ExpectNoError(CreateQuotas(f, namespaces, 2*totalPods, testPhaseDurations.StartPhase(115, "quota creation")))
}
f.AddonResourceConstraints = loadResourceConstraints()
serviceCreationPhase := testPhaseDurations.StartPhase(120, "services creation")
defer serviceCreationPhase.End()
if itArg.services {
@ -240,7 +243,7 @@ var _ = SIGDescribe("Load capacity", func() {
defer GinkgoRecover()
framework.ExpectNoError(testutils.CreateServiceWithRetries(clientset, services[i].Namespace, services[i]))
}
workqueue.Parallelize(serviceOperationsParallelism, len(services), createService)
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), createService)
framework.Logf("%v Services created.", len(services))
defer func(services []*v1.Service) {
serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion")
@ -250,7 +253,7 @@ var _ = SIGDescribe("Load capacity", func() {
defer GinkgoRecover()
framework.ExpectNoError(testutils.DeleteResourceWithRetries(clientset, api.Kind("Service"), services[i].Namespace, services[i].Name, nil))
}
workqueue.Parallelize(serviceOperationsParallelism, len(services), deleteService)
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), deleteService)
framework.Logf("Services deleted")
}(services)
} else {
@ -320,16 +323,14 @@ var _ = SIGDescribe("Load capacity", func() {
// We would like to spread scaling replication controllers over time
// to make it possible to create/schedule & delete them in the meantime.
// Currently we assume that <throughput> pods/second average throughput.
// The expected number of created/deleted pods is less than totalPods/3.
scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second
// The expected number of created/deleted pods is totalPods/4 when scaling,
// as each RC changes its size from X to a uniform random value in [X/2, 3X/2].
scalingTime := time.Duration(totalPods/(4*throughput)) * time.Second
framework.Logf("Starting to scale %v objects first time...", itArg.kind)
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
By("============================================================================")
framework.Logf("Starting to scale %v objects second time...", itArg.kind)
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(400, "scaling second time"))
By("============================================================================")
// Cleanup all created replication controllers.
// Currently we assume <throughput> pods/second average deletion throughput.
// We may want to revisit it in the future.
@ -430,6 +431,19 @@ func computePodCounts(total int) (int, int, int) {
return smallGroupCount, mediumGroupCount, bigGroupCount
}
func loadResourceConstraints() map[string]framework.ResourceConstraint {
constraints := make(map[string]framework.ResourceConstraint)
constraints["coredns"] = framework.ResourceConstraint{
CPUConstraint: framework.NoCPUConstraint,
MemoryConstraint: 170 * (1024 * 1024),
}
constraints["kubedns"] = framework.ResourceConstraint{
CPUConstraint: framework.NoCPUConstraint,
MemoryConstraint: 170 * (1024 * 1024),
}
return constraints
}
func generateConfigs(
totalPods int,
image string,