vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -19,10 +19,10 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/timer:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
@ -39,6 +39,7 @@ go_library(
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/restmapper:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/transport:go_default_library",

View File

@ -35,6 +35,7 @@ import (
utiluuid "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/apis/batch"
@ -44,12 +45,14 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
PodStartupLatencyThreshold = 5 * time.Second
MinSaturationThreshold = 2 * time.Minute
MinPodsPerSecondThroughput = 8
DensityPollInterval = 10 * time.Second
@ -65,6 +68,7 @@ type DensityTestConfig struct {
Configs []testutils.RunObjectConfig
ClientSets []clientset.Interface
InternalClientsets []internalclientset.Interface
ScaleClients []scaleclient.ScalesGetter
PollInterval time.Duration
PodCount int
// What kind of resource we want to create
@ -112,9 +116,8 @@ func (dtc *DensityTestConfig) deleteConfigMaps(testPhase *timer.Phase) {
func (dtc *DensityTestConfig) deleteDaemonSets(numberOfClients int, testPhase *timer.Phase) {
defer testPhase.End()
for i := range dtc.DaemonConfigs {
framework.ExpectNoError(framework.DeleteResourceAndPods(
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
dtc.ClientSets[i%numberOfClients],
dtc.InternalClientsets[i%numberOfClients],
extensions.Kind("DaemonSet"),
dtc.DaemonConfigs[i].Namespace,
dtc.DaemonConfigs[i].Name,
@ -166,9 +169,9 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
} else {
if numNodes <= 100 {
apiserverCPU = 1.8
apiserverMem = 1500 * (1024 * 1024)
controllerCPU = 0.5
controllerMem = 500 * (1024 * 1024)
apiserverMem = 1700 * (1024 * 1024)
controllerCPU = 0.6
controllerMem = 530 * (1024 * 1024)
schedulerCPU = 0.4
schedulerMem = 180 * (1024 * 1024)
}
@ -219,30 +222,60 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
return constraints
}
func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
func computeAverage(sample []float64) float64 {
sum := 0.0
for _, value := range sample {
sum += value
}
return sum / float64(len(sample))
}
func computeQuantile(sample []float64, quantile float64) float64 {
Expect(sort.Float64sAreSorted(sample)).To(Equal(true))
Expect(quantile >= 0.0 && quantile <= 1.0).To(Equal(true))
index := int(quantile*float64(len(sample))) - 1
if index < 0 {
return math.NaN()
}
return sample[index]
}
func logPodStartupStatus(
c clientset.Interface,
expectedPods int,
observedLabels map[string]string,
period time.Duration,
scheduleThroughputs *[]float64,
stopCh chan struct{}) {
label := labels.SelectorFromSet(labels.Set(observedLabels))
podStore := testutils.NewPodStore(c, metav1.NamespaceAll, label, fields.Everything())
podStore, err := testutils.NewPodStore(c, metav1.NamespaceAll, label, fields.Everything())
framework.ExpectNoError(err)
defer podStore.Stop()
ticker := time.NewTicker(period)
startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods)
lastScheduledCount := startupStatus.Scheduled
defer ticker.Stop()
for {
select {
case <-ticker.C:
pods := podStore.List()
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
framework.Logf(startupStatus.String("Density"))
case <-stopCh:
pods := podStore.List()
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
framework.Logf(startupStatus.String("Density"))
return
}
// Log status of the pods.
startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods)
framework.Logf(startupStatus.String("Density"))
// Compute scheduling throughput for the latest time period.
throughput := float64(startupStatus.Scheduled-lastScheduledCount) / float64(period/time.Second)
*scheduleThroughputs = append(*scheduleThroughputs, throughput)
lastScheduledCount = startupStatus.Scheduled
}
}
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) time.Duration {
func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer, scheduleThroughputs *[]float64) time.Duration {
defer GinkgoRecover()
// Create all secrets, configmaps and daemons.
@ -267,9 +300,9 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
}()
}
logStopCh := make(chan struct{})
go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, scheduleThroughputs, logStopCh)
wg.Wait()
startupTime := time.Now().Sub(startTime)
startupTime := time.Since(startTime)
close(logStopCh)
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
@ -313,15 +346,9 @@ func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPha
name := dtc.Configs[i].GetName()
namespace := dtc.Configs[i].GetNamespace()
kind := dtc.Configs[i].GetKind()
if framework.TestContext.GarbageCollectorEnabled && kindSupportsGarbageCollector(kind) {
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name)
framework.ExpectNoError(err)
} else {
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
err := framework.DeleteResourceAndPods(dtc.ClientSets[i%numberOfClients], dtc.InternalClientsets[i%numberOfClients], kind, namespace, name)
framework.ExpectNoError(err)
}
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name)
framework.ExpectNoError(err)
}
podCleanupPhase.End()
@ -348,6 +375,7 @@ var _ = SIGDescribe("Density", func() {
var nodeCpuCapacity int64
var nodeMemCapacity int64
var nodes *v1.NodeList
var scheduleThroughputs []float64
testCaseBaseName := "density"
missingMeasurements := 0
@ -382,21 +410,30 @@ var _ = SIGDescribe("Density", func() {
framework.ExpectNoError(err)
if err == nil {
summaries = append(summaries, metrics)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
}
// Verify scheduler metrics.
// TODO: Reset metrics at the beginning of the test.
// We should do something similar to how we do it for APIserver.
latency, err := framework.VerifySchedulerLatency(c)
framework.ExpectNoError(err)
if err == nil {
// Compute avg and quantiles of throughput (excluding last element, that's usually an outlier).
sampleSize := len(scheduleThroughputs)
if sampleSize > 1 {
scheduleThroughputs = scheduleThroughputs[:sampleSize-1]
sort.Float64s(scheduleThroughputs)
latency.ThroughputAverage = computeAverage(scheduleThroughputs)
latency.ThroughputPerc50 = computeQuantile(scheduleThroughputs, 0.5)
latency.ThroughputPerc90 = computeQuantile(scheduleThroughputs, 0.9)
latency.ThroughputPerc99 = computeQuantile(scheduleThroughputs, 0.99)
}
summaries = append(summaries, latency)
}
summaries = append(summaries, testPhaseDurations)
framework.PrintSummaries(summaries, testCaseBaseName)
// Fail if there were some high-latency requests.
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
// Fail if more than the allowed threshold of measurements were missing in the latencyTest.
Expect(missingMeasurements <= MaxMissingPodStartupMeasurements).To(Equal(true))
})
@ -430,6 +467,7 @@ var _ = SIGDescribe("Density", func() {
uuid = string(utiluuid.NewUUID())
framework.ExpectNoError(framework.ResetSchedulerMetrics(c))
framework.ExpectNoError(framework.ResetMetrics(c))
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
@ -538,8 +576,7 @@ var _ = SIGDescribe("Density", func() {
namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode), testPhaseDurations.StartPhase(200, "namespace creation"))
framework.ExpectNoError(err)
if itArg.quotas {
err := CreateQuotas(f, namespaces, totalPods+nodeCount, testPhaseDurations.StartPhase(210, "quota creation"))
framework.ExpectNoError(err)
framework.ExpectNoError(CreateQuotas(f, namespaces, totalPods+nodeCount, testPhaseDurations.StartPhase(210, "quota creation")))
}
configs := make([]testutils.RunObjectConfig, numberOfCollections)
@ -582,7 +619,7 @@ var _ = SIGDescribe("Density", func() {
Client: clients[i],
InternalClient: internalClients[i],
ScalesGetter: scalesClients[i],
Image: framework.GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Name: name,
Namespace: nsName,
Labels: map[string]string{"type": "densityPod"},
@ -613,11 +650,12 @@ var _ = SIGDescribe("Density", func() {
}
// Single client is running out of http2 connections in delete phase, hence we need more.
clients, internalClients, _, err = createClients(2)
clients, internalClients, scalesClients, err = createClients(2)
dConfig := DensityTestConfig{
ClientSets: clients,
InternalClientsets: internalClients,
ScaleClients: scalesClients,
Configs: configs,
PodCount: totalPods,
PollInterval: DensityPollInterval,
@ -635,7 +673,7 @@ var _ = SIGDescribe("Density", func() {
LogFunc: framework.Logf,
})
}
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations)
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, &scheduleThroughputs)
if itArg.runLatencyTest {
By("Scheduling additional Pods to measure startup latencies")
@ -739,7 +777,7 @@ var _ = SIGDescribe("Density", func() {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
nsName := namespaces[i%len(namespaces)].Name
rcNameToNsMap[name] = nsName
go createRunningPodFromRC(&wg, c, name, nsName, framework.GetPauseImageName(f.ClientSet), additionalPodsPrefix, cpuRequest, memRequest)
go createRunningPodFromRC(&wg, c, name, nsName, imageutils.GetPauseImageName(), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
@ -830,16 +868,29 @@ var _ = SIGDescribe("Density", func() {
sort.Sort(framework.LatencySlice(schedToWatchLag))
sort.Sort(framework.LatencySlice(e2eLag))
framework.PrintLatencies(scheduleLag, "worst schedule latencies")
framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
framework.PrintLatencies(watchLag, "worst watch latencies")
framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
framework.PrintLatencies(e2eLag, "worst e2e total latencies")
framework.PrintLatencies(scheduleLag, "worst create-to-schedule latencies")
framework.PrintLatencies(startupLag, "worst schedule-to-run latencies")
framework.PrintLatencies(watchLag, "worst run-to-watch latencies")
framework.PrintLatencies(schedToWatchLag, "worst schedule-to-watch latencies")
framework.PrintLatencies(e2eLag, "worst e2e latencies")
// Capture latency metrics related to pod-startup.
podStartupLatency := &framework.PodStartupLatency{
CreateToScheduleLatency: framework.ExtractLatencyMetrics(scheduleLag),
ScheduleToRunLatency: framework.ExtractLatencyMetrics(startupLag),
RunToWatchLatency: framework.ExtractLatencyMetrics(watchLag),
ScheduleToWatchLatency: framework.ExtractLatencyMetrics(schedToWatchLag),
E2ELatency: framework.ExtractLatencyMetrics(e2eLag),
}
f.TestSummaries = append(f.TestSummaries, podStartupLatency)
// Test whether e2e pod startup time is acceptable.
podStartupLatency := &framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
f.TestSummaries = append(f.TestSummaries, podStartupLatency)
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
podStartupLatencyThreshold := framework.LatencyMetric{
Perc50: PodStartupLatencyThreshold,
Perc90: PodStartupLatencyThreshold,
Perc99: PodStartupLatencyThreshold,
}
framework.ExpectNoError(framework.VerifyLatencyWithinThreshold(podStartupLatencyThreshold, podStartupLatency.E2ELatency, "pod startup"))
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
latencyMeasurementPhase.End()
@ -901,7 +952,3 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns,
framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
framework.Logf("Found pod '%s' running", name)
}
func kindSupportsGarbageCollector(kind schema.GroupKind) bool {
return kind != extensions.Kind("Deployment") && kind != batch.Kind("Job")
}

View File

@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
@ -42,7 +43,7 @@ var _ = SIGDescribe("Empty [Feature:Empty]", func() {
})
It("starts a pod", func() {
configs, _, _ := GenerateConfigsForGroup([]*v1.Namespace{f.Namespace}, "empty-pod", 1, 1, framework.GetPauseImageName(f.ClientSet), []string{}, api.Kind("ReplicationController"), 0, 0)
configs, _, _ := GenerateConfigsForGroup([]*v1.Namespace{f.Namespace}, "empty-pod", 1, 1, imageutils.GetPauseImageName(), []string{}, api.Kind("ReplicationController"), 0, 0)
if len(configs) != 1 {
framework.Failf("generateConfigs should have generated single config")
}

View File

@ -28,7 +28,6 @@ import (
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -54,6 +53,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/restmapper"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
@ -248,7 +248,7 @@ var _ = SIGDescribe("Load capacity", func() {
framework.Logf("Starting to delete services...")
deleteService := func(i int) {
defer GinkgoRecover()
framework.ExpectNoError(clientset.CoreV1().Services(services[i].Namespace).Delete(services[i].Name, nil))
framework.ExpectNoError(testutils.DeleteResourceWithRetries(clientset, api.Kind("Service"), services[i].Namespace, services[i].Name, nil))
}
workqueue.Parallelize(serviceOperationsParallelism, len(services), deleteService)
framework.Logf("Services deleted")
@ -286,9 +286,8 @@ var _ = SIGDescribe("Load capacity", func() {
}
daemonConfig.Run()
defer func(config *testutils.DaemonConfig) {
framework.ExpectNoError(framework.DeleteResourceAndPods(
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
f.ClientSet,
f.InternalClientset,
extensions.Kind("DaemonSet"),
config.Namespace,
config.Name,
@ -372,10 +371,10 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
MaxIdleConnsPerHost: 100,
Dial: (&net.Dialer{
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
}).DialContext,
})
// Overwrite TLS-related fields from config to avoid collision with
// Transport field.
@ -409,7 +408,7 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
return nil, nil, nil, err
}
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoClient, meta.InterfacesForUnstructured)
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
restMapper.Reset()
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
scalesClients[i] = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
@ -648,7 +647,6 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2)
framework.ExpectNoError(framework.ScaleResource(
config.GetClient(),
config.GetInternalClient(),
config.GetScalesGetter(),
config.GetNamespace(),
config.GetName(),
@ -670,7 +668,7 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
return true, nil
}
framework.Logf("Failed to list pods from %v %v due to: %v", config.GetKind(), config.GetName(), err)
if framework.IsRetryableAPIError(err) {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to list pods from %v %v with non-retriable error: %v", config.GetKind(), config.GetName(), err)
@ -694,15 +692,9 @@ func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deleti
defer wg.Done()
sleepUpTo(deletingTime)
if framework.TestContext.GarbageCollectorEnabled && config.GetKind() != extensions.Kind("Deployment") {
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
config.GetClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
} else {
framework.ExpectNoError(framework.DeleteResourceAndPods(
config.GetClient(), config.GetInternalClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
}
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
config.GetClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
}
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) {