mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
20
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
20
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
@ -14,12 +14,12 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
|
||||
deps = [
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -35,14 +35,14 @@ go_test(
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
16
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/OWNERS
generated
vendored
16
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/OWNERS
generated
vendored
@ -1,16 +1,4 @@
|
||||
approvers:
|
||||
- bsalamat
|
||||
- davidopp
|
||||
- gmarek
|
||||
- jayunit100
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
- sig-scheduling-maintainers
|
||||
reviewers:
|
||||
- bsalamat
|
||||
- davidopp
|
||||
- jayunit100
|
||||
- k82cn
|
||||
- ravisantoshgudimetla
|
||||
- sjug
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
- sig-scheduling
|
||||
|
138
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
138
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
@ -28,7 +28,11 @@ import (
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultNodeStrategy = &testutils.TrivialNodePrepareStrategy{}
|
||||
)
|
||||
|
||||
// BenchmarkScheduling benchmarks the scheduling rate when the cluster has
|
||||
@ -45,41 +49,90 @@ func BenchmarkScheduling(b *testing.B) {
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b)
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, defaultNodeStrategy, setupStrategy, testStrategy, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkSchedulingAntiAffinity benchmarks the scheduling rate of pods with
|
||||
// BenchmarkSchedulingPodAntiAffinity benchmarks the scheduling rate of pods with
|
||||
// PodAntiAffinity rules when the cluster has various quantities of nodes and
|
||||
// scheduled pods.
|
||||
func BenchmarkSchedulingAntiAffinity(b *testing.B) {
|
||||
func BenchmarkSchedulingPodAntiAffinity(b *testing.B) {
|
||||
tests := []struct{ nodes, existingPods, minPods int }{
|
||||
{nodes: 500, existingPods: 250, minPods: 250},
|
||||
{nodes: 500, existingPods: 5000, minPods: 250},
|
||||
{nodes: 1000, existingPods: 1000, minPods: 500},
|
||||
}
|
||||
// The setup strategy creates pods with no affinity rules.
|
||||
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup")
|
||||
// The test strategy creates pods with anti-affinity for each other.
|
||||
testBasePod := makeBasePodWithAntiAffinity(
|
||||
testBasePod := makeBasePodWithPodAntiAffinity(
|
||||
map[string]string{"name": "test", "color": "green"},
|
||||
map[string]string{"color": "green"})
|
||||
// The test strategy creates pods with anti-affinity for each other.
|
||||
testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b)
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, defaultNodeStrategy, setupStrategy, testStrategy, b)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// makeBasePodWithAntiAffinity creates a Pod object to be used as a template.
|
||||
// BenchmarkSchedulingPodAffinity benchmarks the scheduling rate of pods with
|
||||
// PodAffinity rules when the cluster has various quantities of nodes and
|
||||
// scheduled pods.
|
||||
func BenchmarkSchedulingPodAffinity(b *testing.B) {
|
||||
tests := []struct{ nodes, existingPods, minPods int }{
|
||||
{nodes: 500, existingPods: 250, minPods: 250},
|
||||
{nodes: 500, existingPods: 5000, minPods: 250},
|
||||
{nodes: 1000, existingPods: 1000, minPods: 500},
|
||||
}
|
||||
// The setup strategy creates pods with no affinity rules.
|
||||
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup")
|
||||
testBasePod := makeBasePodWithPodAffinity(
|
||||
map[string]string{"foo": ""},
|
||||
map[string]string{"foo": ""},
|
||||
)
|
||||
// The test strategy creates pods with affinity for each other.
|
||||
testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
|
||||
nodeStrategy := testutils.NewLabelNodePrepareStrategy(apis.LabelZoneFailureDomain, "zone1")
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, nodeStrategy, setupStrategy, testStrategy, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkSchedulingNodeAffinity benchmarks the scheduling rate of pods with
|
||||
// NodeAffinity rules when the cluster has various quantities of nodes and
|
||||
// scheduled pods.
|
||||
func BenchmarkSchedulingNodeAffinity(b *testing.B) {
|
||||
tests := []struct{ nodes, existingPods, minPods int }{
|
||||
{nodes: 500, existingPods: 250, minPods: 250},
|
||||
{nodes: 500, existingPods: 5000, minPods: 250},
|
||||
{nodes: 1000, existingPods: 1000, minPods: 500},
|
||||
}
|
||||
// The setup strategy creates pods with no affinity rules.
|
||||
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup")
|
||||
testBasePod := makeBasePodWithNodeAffinity(apis.LabelZoneFailureDomain, []string{"zone1", "zone2"})
|
||||
// The test strategy creates pods with node-affinity for each other.
|
||||
testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
|
||||
nodeStrategy := testutils.NewLabelNodePrepareStrategy(apis.LabelZoneFailureDomain, "zone1")
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, nodeStrategy, setupStrategy, testStrategy, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// makeBasePodWithPodAntiAffinity creates a Pod object to be used as a template.
|
||||
// The Pod has a PodAntiAffinity requirement against pods with the given labels.
|
||||
func makeBasePodWithAntiAffinity(podLabels, affinityLabels map[string]string) *v1.Pod {
|
||||
func makeBasePodWithPodAntiAffinity(podLabels, affinityLabels map[string]string) *v1.Pod {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "affinity-pod-",
|
||||
GenerateName: "anit-affinity-pod-",
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: testutils.MakePodSpec(),
|
||||
@ -99,11 +152,66 @@ func makeBasePodWithAntiAffinity(podLabels, affinityLabels map[string]string) *v
|
||||
return basePod
|
||||
}
|
||||
|
||||
// makeBasePodWithPodAffinity creates a Pod object to be used as a template.
|
||||
// The Pod has a PodAffinity requirement against pods with the given labels.
|
||||
func makeBasePodWithPodAffinity(podLabels, affinityZoneLabels map[string]string) *v1.Pod {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "affinity-pod-",
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: testutils.MakePodSpec(),
|
||||
}
|
||||
basePod.Spec.Affinity = &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: affinityZoneLabels,
|
||||
},
|
||||
TopologyKey: apis.LabelZoneFailureDomain,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return basePod
|
||||
}
|
||||
|
||||
// makeBasePodWithNodeAffinity creates a Pod object to be used as a template.
|
||||
// The Pod has a NodeAffinity requirement against nodes with the given expressions.
|
||||
func makeBasePodWithNodeAffinity(key string, vals []string) *v1.Pod {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "node-affinity-",
|
||||
},
|
||||
Spec: testutils.MakePodSpec(),
|
||||
}
|
||||
basePod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: key,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: vals,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return basePod
|
||||
}
|
||||
|
||||
// benchmarkScheduling benchmarks scheduling rate with specific number of nodes
|
||||
// and specific number of pods already scheduled.
|
||||
// This will schedule numExistingPods pods before the benchmark starts, and at
|
||||
// least minPods pods during the benchmark.
|
||||
func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
nodeStrategy testutils.PrepareNodeStrategy,
|
||||
setupPodStrategy, testPodStrategy testutils.TestPodCreateStrategy,
|
||||
b *testing.B) {
|
||||
if b.N < minPods {
|
||||
@ -115,11 +223,11 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
|
||||
nodePreparer := framework.NewIntegrationTestNodePreparer(
|
||||
c,
|
||||
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
|
||||
[]testutils.CountToStrategy{{Count: numNodes, Strategy: nodeStrategy}},
|
||||
"scheduler-perf-",
|
||||
)
|
||||
if err := nodePreparer.PrepareNodes(); err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
defer nodePreparer.CleanupNodes()
|
||||
|
||||
@ -131,7 +239,7 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
for {
|
||||
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= numExistingPods {
|
||||
break
|
||||
@ -149,7 +257,7 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int,
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= numExistingPods+b.N {
|
||||
break
|
||||
|
16
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
@ -18,12 +18,12 @@ package benchmark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"math"
|
||||
"strconv"
|
||||
@ -105,7 +105,7 @@ type testConfig struct {
|
||||
numNodes int
|
||||
mutatedNodeTemplate *v1.Node
|
||||
mutatedPodTemplate *v1.Pod
|
||||
schedulerSupportFunctions scheduler.Configurator
|
||||
schedulerSupportFunctions factory.Configurator
|
||||
destroyFunc func()
|
||||
}
|
||||
|
||||
@ -137,7 +137,7 @@ func schedulePods(config *testConfig) int32 {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
// 30,000 pods -> wait till @ least 300 are scheduled to start measuring.
|
||||
// TODO Find out why sometimes there may be scheduling blips in the beginning.
|
||||
@ -155,15 +155,19 @@ func schedulePods(config *testConfig) int32 {
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
// We will be completed when all pods are done being scheduled.
|
||||
// return the worst-case-scenario interval that was seen during this time.
|
||||
// Note this should never be low due to cold-start, so allow bake in sched time if necessary.
|
||||
if len(scheduled) >= config.numPods {
|
||||
consumed := int(time.Since(start) / time.Second)
|
||||
if consumed <= 0 {
|
||||
consumed = 1
|
||||
}
|
||||
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average). min QPS was %v\n",
|
||||
config.numPods, int(time.Since(start)/time.Second), config.numPods/int(time.Since(start)/time.Second), minQps)
|
||||
config.numPods, consumed, config.numPods/consumed, minQps)
|
||||
return minQps
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
@ -32,7 +32,7 @@ import (
|
||||
// remove resources after finished.
|
||||
// Notes on rate limiter:
|
||||
// - client rate limit is set to 5000.
|
||||
func mustSetupScheduler() (scheduler.Configurator, util.ShutdownFunc) {
|
||||
func mustSetupScheduler() (factory.Configurator, util.ShutdownFunc) {
|
||||
apiURL, apiShutdown := util.StartApiserver()
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: apiURL,
|
||||
|
Reference in New Issue
Block a user