vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -14,19 +14,13 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/api/testapi:go_default_library",
"//plugin/pkg/scheduler:go_default_library",
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
"//plugin/pkg/scheduler/factory:go_default_library",
"//test/integration/framework:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//pkg/scheduler:go_default_library",
"//pkg/scheduler/algorithmprovider:go_default_library",
"//test/integration/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
@ -38,11 +32,11 @@ go_test(
"scheduler_bench_test.go",
"scheduler_test.go",
],
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
library = ":go_default_library",
embed = [":go_default_library"],
tags = ["integration"],
deps = [
"//plugin/pkg/scheduler:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler:go_default_library",
"//test/integration/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",

View File

@ -17,44 +17,98 @@ limitations under the License.
package benchmark
import (
"fmt"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
// BenchmarkScheduling100Nodes0Pods benchmarks the scheduling rate
// when the cluster has 100 nodes and 0 scheduled pods
func BenchmarkScheduling100Nodes0Pods(b *testing.B) {
benchmarkScheduling(100, 0, b)
// BenchmarkScheduling benchmarks the scheduling rate when the cluster has
// various quantities of nodes and scheduled pods.
func BenchmarkScheduling(b *testing.B) {
tests := []struct{ nodes, existingPods, minPods int }{
{nodes: 100, existingPods: 0, minPods: 100},
{nodes: 100, existingPods: 1000, minPods: 100},
{nodes: 1000, existingPods: 0, minPods: 100},
{nodes: 1000, existingPods: 1000, minPods: 100},
}
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc1")
testStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc2")
for _, test := range tests {
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
b.Run(name, func(b *testing.B) {
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b)
})
}
}
// BenchmarkScheduling100Nodes1000Pods benchmarks the scheduling rate
// when the cluster has 100 nodes and 1000 scheduled pods
func BenchmarkScheduling100Nodes1000Pods(b *testing.B) {
benchmarkScheduling(100, 1000, b)
// BenchmarkSchedulingAntiAffinity benchmarks the scheduling rate of pods with
// PodAntiAffinity rules when the cluster has various quantities of nodes and
// scheduled pods.
func BenchmarkSchedulingAntiAffinity(b *testing.B) {
tests := []struct{ nodes, existingPods, minPods int }{
{nodes: 500, existingPods: 250, minPods: 250},
{nodes: 500, existingPods: 5000, minPods: 250},
}
// The setup strategy creates pods with no affinity rules.
setupStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("setup")
// The test strategy creates pods with anti-affinity for each other.
testBasePod := makeBasePodWithAntiAffinity(
map[string]string{"name": "test", "color": "green"},
map[string]string{"color": "green"})
testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
for _, test := range tests {
name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
b.Run(name, func(b *testing.B) {
benchmarkScheduling(test.nodes, test.existingPods, test.minPods, setupStrategy, testStrategy, b)
})
}
}
// BenchmarkScheduling1000Nodes0Pods benchmarks the scheduling rate
// when the cluster has 1000 nodes and 0 scheduled pods
func BenchmarkScheduling1000Nodes0Pods(b *testing.B) {
benchmarkScheduling(1000, 0, b)
}
// BenchmarkScheduling1000Nodes1000Pods benchmarks the scheduling rate
// when the cluster has 1000 nodes and 1000 scheduled pods
func BenchmarkScheduling1000Nodes1000Pods(b *testing.B) {
benchmarkScheduling(1000, 1000, b)
// makeBasePodWithAntiAffinity creates a Pod object to be used as a template.
// The Pod has a PodAntiAffinity requirement against pods with the given labels.
func makeBasePodWithAntiAffinity(podLabels, affinityLabels map[string]string) *v1.Pod {
basePod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "affinity-pod-",
Labels: podLabels,
},
Spec: testutils.MakePodSpec(),
}
basePod.Spec.Affinity = &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affinityLabels,
},
TopologyKey: apis.LabelHostname,
},
},
},
}
return basePod
}
// benchmarkScheduling benchmarks scheduling rate with specific number of nodes
// and specific number of pods already scheduled. Since an operation takes relatively
// long time, b.N should be small: 10 - 100.
func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
// and specific number of pods already scheduled.
// This will schedule numExistingPods pods before the benchmark starts, and at
// least minPods pods during the benchmark.
func benchmarkScheduling(numNodes, numExistingPods, minPods int,
setupPodStrategy, testPodStrategy testutils.TestPodCreateStrategy,
b *testing.B) {
if b.N < minPods {
b.N = minPods
}
schedulerConfigFactory, finalFunc := mustSetupScheduler()
defer finalFunc()
c := schedulerConfigFactory.GetClient()
@ -70,7 +124,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
defer nodePreparer.CleanupNodes()
config := testutils.NewTestPodCreatorConfig()
config.AddStrategy("sched-test", numScheduledPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
config.AddStrategy("sched-test", numExistingPods, setupPodStrategy)
podCreator := testutils.NewTestPodCreator(c, config)
podCreator.CreatePods()
@ -79,7 +133,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
if err != nil {
glog.Fatalf("%v", err)
}
if len(scheduled) >= numScheduledPods {
if len(scheduled) >= numExistingPods {
break
}
time.Sleep(1 * time.Second)
@ -87,7 +141,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
// start benchmark
b.ResetTimer()
config = testutils.NewTestPodCreatorConfig()
config.AddStrategy("sched-test", b.N, testutils.NewSimpleWithControllerCreatePodStrategy("rc2"))
config.AddStrategy("sched-test", b.N, testPodStrategy)
podCreator = testutils.NewTestPodCreator(c, config)
podCreator.CreatePods()
for {
@ -97,7 +151,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
if err != nil {
glog.Fatalf("%v", err)
}
if len(scheduled) >= numScheduledPods+b.N {
if len(scheduled) >= numExistingPods+b.N {
break
}
// Note: This might introduce slight deviation in accuracy of benchmark results.

View File

@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/plugin/pkg/scheduler"
"k8s.io/kubernetes/pkg/scheduler"
testutils "k8s.io/kubernetes/test/utils"
"math"
"strconv"
@ -144,7 +144,7 @@ func schedulePods(config *testConfig) int32 {
glog.Fatalf("%v", err)
}
// 30,000 pods -> wait till @ least 300 are scheduled to start measuring.
// TODO Find out why sometimes there may be scheduling blips in the beggining.
// TODO Find out why sometimes there may be scheduling blips in the beginning.
if len(scheduled) > config.numPods/100 {
break
}
@ -258,7 +258,7 @@ func (inputConfig *schedulerPerfConfig) generatePodAndNodeTopology(config *testC
}
// writePodAndNodeTopologyToConfig reads a configuration and then applies it to a test configuration.
//TODO: As of now, this function is not doing anything expect for reading input values to priority structs.
//TODO: As of now, this function is not doing anything except for reading input values to priority structs.
func writePodAndNodeTopologyToConfig(config *testConfig) error {
// High Level structure that should be filled for every predicate or priority.
inputConfig := &schedulerPerfConfig{

View File

@ -17,26 +17,15 @@ limitations under the License.
package benchmark
import (
"net/http"
"net/http/httptest"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
"k8s.io/kubernetes/test/integration/framework"
"k8s.io/kubernetes/pkg/scheduler"
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/test/integration/util"
)
const enableEquivalenceCache = true
// mustSetupScheduler starts the following components:
// - k8s api server (a.k.a. master)
// - scheduler
@ -44,63 +33,19 @@ const enableEquivalenceCache = true
// remove resources after finished.
// Notes on rate limiter:
// - client rate limit is set to 5000.
func mustSetupScheduler() (schedulerConfigurator scheduler.Configurator, destroyFunc func()) {
h := &framework.MasterHolder{Initialized: make(chan struct{})}
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
<-h.Initialized
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
}))
framework.RunAMasterUsingServer(framework.NewIntegrationTestMasterConfig(), s, h)
func mustSetupScheduler() (scheduler.Configurator, util.ShutdownFunc) {
apiURL, apiShutdown := util.StartApiserver()
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
Host: apiURL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()},
QPS: 5000.0,
Burst: 5000,
})
schedulerConfig, schedulerShutdown := util.StartScheduler(clientSet, true)
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
schedulerConfigurator = factory.NewConfigFactory(
v1.DefaultSchedulerName,
clientSet,
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
informerFactory.Storage().V1().StorageClasses(),
v1.DefaultHardPodAffinitySymmetricWeight,
enableEquivalenceCache,
)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
sched, err := scheduler.NewFromConfigurator(schedulerConfigurator, func(conf *scheduler.Config) {
conf.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"})
})
if err != nil {
glog.Fatalf("Error creating scheduler: %v", err)
shutdownFunc := func() {
schedulerShutdown()
apiShutdown()
}
stop := make(chan struct{})
informerFactory.Start(stop)
sched.Run()
destroyFunc = func() {
glog.Infof("destroying")
sched.StopEverything()
close(stop)
s.Close()
glog.Infof("destroyed")
}
return
return schedulerConfig, shutdownFunc
}