mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
139
vendor/k8s.io/kubernetes/test/e2e/scalability/density.go
generated
vendored
139
vendor/k8s.io/kubernetes/test/e2e/scalability/density.go
generated
vendored
@ -35,6 +35,7 @@ import (
|
||||
utiluuid "k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
@ -44,12 +45,14 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
PodStartupLatencyThreshold = 5 * time.Second
|
||||
MinSaturationThreshold = 2 * time.Minute
|
||||
MinPodsPerSecondThroughput = 8
|
||||
DensityPollInterval = 10 * time.Second
|
||||
@ -65,6 +68,7 @@ type DensityTestConfig struct {
|
||||
Configs []testutils.RunObjectConfig
|
||||
ClientSets []clientset.Interface
|
||||
InternalClientsets []internalclientset.Interface
|
||||
ScaleClients []scaleclient.ScalesGetter
|
||||
PollInterval time.Duration
|
||||
PodCount int
|
||||
// What kind of resource we want to create
|
||||
@ -112,9 +116,8 @@ func (dtc *DensityTestConfig) deleteConfigMaps(testPhase *timer.Phase) {
|
||||
func (dtc *DensityTestConfig) deleteDaemonSets(numberOfClients int, testPhase *timer.Phase) {
|
||||
defer testPhase.End()
|
||||
for i := range dtc.DaemonConfigs {
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
|
||||
dtc.ClientSets[i%numberOfClients],
|
||||
dtc.InternalClientsets[i%numberOfClients],
|
||||
extensions.Kind("DaemonSet"),
|
||||
dtc.DaemonConfigs[i].Namespace,
|
||||
dtc.DaemonConfigs[i].Name,
|
||||
@ -166,9 +169,9 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
|
||||
} else {
|
||||
if numNodes <= 100 {
|
||||
apiserverCPU = 1.8
|
||||
apiserverMem = 1500 * (1024 * 1024)
|
||||
controllerCPU = 0.5
|
||||
controllerMem = 500 * (1024 * 1024)
|
||||
apiserverMem = 1700 * (1024 * 1024)
|
||||
controllerCPU = 0.6
|
||||
controllerMem = 530 * (1024 * 1024)
|
||||
schedulerCPU = 0.4
|
||||
schedulerMem = 180 * (1024 * 1024)
|
||||
}
|
||||
@ -219,30 +222,60 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
|
||||
return constraints
|
||||
}
|
||||
|
||||
func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
|
||||
func computeAverage(sample []float64) float64 {
|
||||
sum := 0.0
|
||||
for _, value := range sample {
|
||||
sum += value
|
||||
}
|
||||
return sum / float64(len(sample))
|
||||
}
|
||||
|
||||
func computeQuantile(sample []float64, quantile float64) float64 {
|
||||
Expect(sort.Float64sAreSorted(sample)).To(Equal(true))
|
||||
Expect(quantile >= 0.0 && quantile <= 1.0).To(Equal(true))
|
||||
index := int(quantile*float64(len(sample))) - 1
|
||||
if index < 0 {
|
||||
return math.NaN()
|
||||
}
|
||||
return sample[index]
|
||||
}
|
||||
|
||||
func logPodStartupStatus(
|
||||
c clientset.Interface,
|
||||
expectedPods int,
|
||||
observedLabels map[string]string,
|
||||
period time.Duration,
|
||||
scheduleThroughputs *[]float64,
|
||||
stopCh chan struct{}) {
|
||||
|
||||
label := labels.SelectorFromSet(labels.Set(observedLabels))
|
||||
podStore := testutils.NewPodStore(c, metav1.NamespaceAll, label, fields.Everything())
|
||||
podStore, err := testutils.NewPodStore(c, metav1.NamespaceAll, label, fields.Everything())
|
||||
framework.ExpectNoError(err)
|
||||
defer podStore.Stop()
|
||||
|
||||
ticker := time.NewTicker(period)
|
||||
startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods)
|
||||
lastScheduledCount := startupStatus.Scheduled
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
pods := podStore.List()
|
||||
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
|
||||
framework.Logf(startupStatus.String("Density"))
|
||||
case <-stopCh:
|
||||
pods := podStore.List()
|
||||
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
|
||||
framework.Logf(startupStatus.String("Density"))
|
||||
return
|
||||
}
|
||||
// Log status of the pods.
|
||||
startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods)
|
||||
framework.Logf(startupStatus.String("Density"))
|
||||
// Compute scheduling throughput for the latest time period.
|
||||
throughput := float64(startupStatus.Scheduled-lastScheduledCount) / float64(period/time.Second)
|
||||
*scheduleThroughputs = append(*scheduleThroughputs, throughput)
|
||||
lastScheduledCount = startupStatus.Scheduled
|
||||
}
|
||||
}
|
||||
|
||||
// runDensityTest will perform a density test and return the time it took for
|
||||
// all pods to start
|
||||
func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) time.Duration {
|
||||
func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer, scheduleThroughputs *[]float64) time.Duration {
|
||||
defer GinkgoRecover()
|
||||
|
||||
// Create all secrets, configmaps and daemons.
|
||||
@ -267,9 +300,9 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
|
||||
}()
|
||||
}
|
||||
logStopCh := make(chan struct{})
|
||||
go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
|
||||
go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, scheduleThroughputs, logStopCh)
|
||||
wg.Wait()
|
||||
startupTime := time.Now().Sub(startTime)
|
||||
startupTime := time.Since(startTime)
|
||||
close(logStopCh)
|
||||
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
|
||||
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
|
||||
@ -313,15 +346,9 @@ func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPha
|
||||
name := dtc.Configs[i].GetName()
|
||||
namespace := dtc.Configs[i].GetNamespace()
|
||||
kind := dtc.Configs[i].GetKind()
|
||||
if framework.TestContext.GarbageCollectorEnabled && kindSupportsGarbageCollector(kind) {
|
||||
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
|
||||
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name)
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
|
||||
err := framework.DeleteResourceAndPods(dtc.ClientSets[i%numberOfClients], dtc.InternalClientsets[i%numberOfClients], kind, namespace, name)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
|
||||
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
podCleanupPhase.End()
|
||||
|
||||
@ -348,6 +375,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
var nodeCpuCapacity int64
|
||||
var nodeMemCapacity int64
|
||||
var nodes *v1.NodeList
|
||||
var scheduleThroughputs []float64
|
||||
|
||||
testCaseBaseName := "density"
|
||||
missingMeasurements := 0
|
||||
@ -382,21 +410,30 @@ var _ = SIGDescribe("Density", func() {
|
||||
framework.ExpectNoError(err)
|
||||
if err == nil {
|
||||
summaries = append(summaries, metrics)
|
||||
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
|
||||
}
|
||||
|
||||
// Verify scheduler metrics.
|
||||
// TODO: Reset metrics at the beginning of the test.
|
||||
// We should do something similar to how we do it for APIserver.
|
||||
latency, err := framework.VerifySchedulerLatency(c)
|
||||
framework.ExpectNoError(err)
|
||||
if err == nil {
|
||||
// Compute avg and quantiles of throughput (excluding last element, that's usually an outlier).
|
||||
sampleSize := len(scheduleThroughputs)
|
||||
if sampleSize > 1 {
|
||||
scheduleThroughputs = scheduleThroughputs[:sampleSize-1]
|
||||
sort.Float64s(scheduleThroughputs)
|
||||
latency.ThroughputAverage = computeAverage(scheduleThroughputs)
|
||||
latency.ThroughputPerc50 = computeQuantile(scheduleThroughputs, 0.5)
|
||||
latency.ThroughputPerc90 = computeQuantile(scheduleThroughputs, 0.9)
|
||||
latency.ThroughputPerc99 = computeQuantile(scheduleThroughputs, 0.99)
|
||||
}
|
||||
summaries = append(summaries, latency)
|
||||
}
|
||||
summaries = append(summaries, testPhaseDurations)
|
||||
|
||||
framework.PrintSummaries(summaries, testCaseBaseName)
|
||||
|
||||
// Fail if there were some high-latency requests.
|
||||
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
|
||||
// Fail if more than the allowed threshold of measurements were missing in the latencyTest.
|
||||
Expect(missingMeasurements <= MaxMissingPodStartupMeasurements).To(Equal(true))
|
||||
})
|
||||
@ -430,6 +467,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
|
||||
uuid = string(utiluuid.NewUUID())
|
||||
|
||||
framework.ExpectNoError(framework.ResetSchedulerMetrics(c))
|
||||
framework.ExpectNoError(framework.ResetMetrics(c))
|
||||
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
|
||||
|
||||
@ -538,8 +576,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode), testPhaseDurations.StartPhase(200, "namespace creation"))
|
||||
framework.ExpectNoError(err)
|
||||
if itArg.quotas {
|
||||
err := CreateQuotas(f, namespaces, totalPods+nodeCount, testPhaseDurations.StartPhase(210, "quota creation"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(CreateQuotas(f, namespaces, totalPods+nodeCount, testPhaseDurations.StartPhase(210, "quota creation")))
|
||||
}
|
||||
|
||||
configs := make([]testutils.RunObjectConfig, numberOfCollections)
|
||||
@ -582,7 +619,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
Client: clients[i],
|
||||
InternalClient: internalClients[i],
|
||||
ScalesGetter: scalesClients[i],
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: name,
|
||||
Namespace: nsName,
|
||||
Labels: map[string]string{"type": "densityPod"},
|
||||
@ -613,11 +650,12 @@ var _ = SIGDescribe("Density", func() {
|
||||
}
|
||||
|
||||
// Single client is running out of http2 connections in delete phase, hence we need more.
|
||||
clients, internalClients, _, err = createClients(2)
|
||||
clients, internalClients, scalesClients, err = createClients(2)
|
||||
|
||||
dConfig := DensityTestConfig{
|
||||
ClientSets: clients,
|
||||
InternalClientsets: internalClients,
|
||||
ScaleClients: scalesClients,
|
||||
Configs: configs,
|
||||
PodCount: totalPods,
|
||||
PollInterval: DensityPollInterval,
|
||||
@ -635,7 +673,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
LogFunc: framework.Logf,
|
||||
})
|
||||
}
|
||||
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations)
|
||||
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, &scheduleThroughputs)
|
||||
if itArg.runLatencyTest {
|
||||
By("Scheduling additional Pods to measure startup latencies")
|
||||
|
||||
@ -739,7 +777,7 @@ var _ = SIGDescribe("Density", func() {
|
||||
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
|
||||
nsName := namespaces[i%len(namespaces)].Name
|
||||
rcNameToNsMap[name] = nsName
|
||||
go createRunningPodFromRC(&wg, c, name, nsName, framework.GetPauseImageName(f.ClientSet), additionalPodsPrefix, cpuRequest, memRequest)
|
||||
go createRunningPodFromRC(&wg, c, name, nsName, imageutils.GetPauseImageName(), additionalPodsPrefix, cpuRequest, memRequest)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
wg.Wait()
|
||||
@ -830,16 +868,29 @@ var _ = SIGDescribe("Density", func() {
|
||||
sort.Sort(framework.LatencySlice(schedToWatchLag))
|
||||
sort.Sort(framework.LatencySlice(e2eLag))
|
||||
|
||||
framework.PrintLatencies(scheduleLag, "worst schedule latencies")
|
||||
framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
|
||||
framework.PrintLatencies(watchLag, "worst watch latencies")
|
||||
framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
|
||||
framework.PrintLatencies(e2eLag, "worst e2e total latencies")
|
||||
framework.PrintLatencies(scheduleLag, "worst create-to-schedule latencies")
|
||||
framework.PrintLatencies(startupLag, "worst schedule-to-run latencies")
|
||||
framework.PrintLatencies(watchLag, "worst run-to-watch latencies")
|
||||
framework.PrintLatencies(schedToWatchLag, "worst schedule-to-watch latencies")
|
||||
framework.PrintLatencies(e2eLag, "worst e2e latencies")
|
||||
|
||||
// Capture latency metrics related to pod-startup.
|
||||
podStartupLatency := &framework.PodStartupLatency{
|
||||
CreateToScheduleLatency: framework.ExtractLatencyMetrics(scheduleLag),
|
||||
ScheduleToRunLatency: framework.ExtractLatencyMetrics(startupLag),
|
||||
RunToWatchLatency: framework.ExtractLatencyMetrics(watchLag),
|
||||
ScheduleToWatchLatency: framework.ExtractLatencyMetrics(schedToWatchLag),
|
||||
E2ELatency: framework.ExtractLatencyMetrics(e2eLag),
|
||||
}
|
||||
f.TestSummaries = append(f.TestSummaries, podStartupLatency)
|
||||
|
||||
// Test whether e2e pod startup time is acceptable.
|
||||
podStartupLatency := &framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
|
||||
f.TestSummaries = append(f.TestSummaries, podStartupLatency)
|
||||
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
|
||||
podStartupLatencyThreshold := framework.LatencyMetric{
|
||||
Perc50: PodStartupLatencyThreshold,
|
||||
Perc90: PodStartupLatencyThreshold,
|
||||
Perc99: PodStartupLatencyThreshold,
|
||||
}
|
||||
framework.ExpectNoError(framework.VerifyLatencyWithinThreshold(podStartupLatencyThreshold, podStartupLatency.E2ELatency, "pod startup"))
|
||||
|
||||
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
|
||||
latencyMeasurementPhase.End()
|
||||
@ -901,7 +952,3 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns,
|
||||
framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
|
||||
framework.Logf("Found pod '%s' running", name)
|
||||
}
|
||||
|
||||
func kindSupportsGarbageCollector(kind schema.GroupKind) bool {
|
||||
return kind != extensions.Kind("Deployment") && kind != batch.Kind("Job")
|
||||
}
|
||||
|
Reference in New Issue
Block a user