vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -0,0 +1,70 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_test(
name = "go_default_test",
size = "large",
srcs = [
"ipam_test.go",
"main_test.go",
],
embed = [":go_default_library"],
tags = ["integration"],
deps = [
"//pkg/controller/nodeipam:go_default_library",
"//pkg/controller/nodeipam/ipam:go_default_library",
"//test/integration/framework:go_default_library",
"//test/integration/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_library(
name = "go_default_library",
srcs = [
"cloud.go",
"results.go",
"util.go",
],
importpath = "k8s.io/kubernetes/test/integration/ipamperf",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
"//pkg/controller/nodeipam/ipam:go_default_library",
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
"//pkg/controller/util/node:go_default_library",
"//test/integration/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)

View File

@ -0,0 +1,85 @@
IPAM Performance Test
=====
Motivation
-----
We wanted to be able to test the behavior of the IPAM controller's under various scenarios,
by mocking and monitoring the edges that the controller interacts with. This has the following goals:
- Save time on testing
- To simulate various behaviors cheaply
- To observe and model the ideal behavior of the IPAM controller code
Currently the test runs through the 4 different IPAM controller modes for cases where the kube API QPS is a)
equal to and b) significantly less than the number of nodes being added to observe and quantify behavior.
How to run
-------
```shell
# In kubernetes root path
make generated_files
cd test/integration/ipamperf
./test-performance.sh
```
The runner scripts support a few different options:
```shell
./test-performance.sh -h
usage: ./test-performance.sh [-h] [-d] [-r <pattern>] [-o <filename>]
usage: ./test-performance.sh <options>
-h display this help message
-d enable debug logs in tests
-r <pattern> regex pattern to match for tests
-o <filename> file to write JSON formatted results to
-p <id> enable cpu and memory profiles, output written to mem-<id>.out and cpu-<id>.out
-c enable custom test configuration
-a <name> allocator name, one of RangeAllocator, CloudAllocator, IPAMFromCluster, IPAMFromCloud
-k <num> api server qps for allocator
-n <num> number of nodes to simulate
-m <num> api server qps for node creation
-l <num> gce cloud endpoint qps
```
The tests follow the pattern TestPerformance/{AllocatorType}-KubeQPS{X}-Nodes{Y}, where AllocatorType
is one of
- RangeAllocator
- IPAMFromCluster
- CloudAllocator
- IPAMFromCloud
and X represents the QPS configured for the kubernetes API client, and Y is the number of nodes to create.
The -d flags set the -v level for glog to 6, enabling nearly all of the debug logs in the code.
So to run the test for CloudAllocator with 10 nodes, one can run
```shell
./test-performance.sh -r /CloudAllocator.*Nodes10$
```
At the end of the test, a JSON format of the results for all the tests run is printed. Passing the -o option
allows for also saving this JSON to a named file.
### Profiling the code
It's possible to get the CPU and memory profiles of code during test execution by using the ```-p``` option.
The CPU and memory profiles are generated in the same directory with the file names set to ```cpu-<id>.out```
and ```cpu-<id>.out```, where ```<id>``` is the argument value. Typicall pattern is to put in the number
of nodes being simulated as the id, or 'all' in case running the full suite.
### Custom Test Configuration
It's also possible to run a custom test configuration by passing the -c option. With this option, it then
possible to specify the number of nodes to simulate and the API server qps values for creation,
IPAM allocation and cloud endpoint, along with the allocator name to run. The defaults values for the
qps parmeters are 30 for IPAM allocation, 100 for node creation and 30 for the cloud endpoint, and the
default allocator is the RangeAllocator.
Code Organization
-----
The core of the tests are defined in [ipam_test.go](ipam_test.go), using the t.Run() helper to control parallelism
as we want to able to start the master once. [cloud.go](cloud.go) contains the mock of the cloud server endpoint
and can be configured to behave differently as needed by the various modes. The tracking of the node behavior and
creation of the test results data is in [results.go](results.go).

View File

@ -0,0 +1,154 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipamperf
import (
"context"
"net"
"sync"
beta "google.golang.org/api/compute/v0.beta"
ga "google.golang.org/api/compute/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
"k8s.io/kubernetes/test/integration/util"
)
// implemntation note:
// ------------------
// cloud.go implements hooks and handler functions for the MockGCE cloud in order to meet expectations
// of cloud behavior from the IPAM controllers. The key constraint is that the IPAM code is spread
// across both GA and Beta instances, which are distinct objects in the mock. We need to solve for
//
// 1. When a GET is called on an instance, we lazy create the instance with or without an assigned
// ip alias as needed by the IPAM controller type
// 2. When we assign an IP alias for an instance, both the GA and Beta instance have to agree on the
// assigned alias range
//
// We solve both the problems by using a baseInstanceList which maintains a list of known instances,
// and their pre-assigned ip-alias ranges (if needed). We then create GetHook for GA and Beta GetInstance
// calls as closures over this betaInstanceList that can lookup base instance data.
//
// This has the advantage that once the Get hook pouplates the GCEMock with the base data, we then let the
// rest of the mock code run as is.
// baseInstance tracks basic instance data needed by the IPAM controllers
type baseInstance struct {
name string
zone string
aliasRange string
}
// baseInstanceList tracks a set of base instances
type baseInstanceList struct {
allocateCIDR bool
clusterCIDR *net.IPNet
subnetMaskSize int
cidrSet *cidrset.CidrSet
lock sync.Mutex // protect access to instances
instances map[meta.Key]*baseInstance
}
// toGA is an utility method to return the baseInstance data as a GA Instance object
func (bi *baseInstance) toGA() *ga.Instance {
inst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}
if bi.aliasRange != "" {
inst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{
{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},
}
}
return inst
}
// toGA is an utility method to return the baseInstance data as a beta Instance object
func (bi *baseInstance) toBeta() *beta.Instance {
inst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}
if bi.aliasRange != "" {
inst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{
{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},
}
}
return inst
}
// newBaseInstanceList is the baseInstanceList constructor
func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {
cidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)
return &baseInstanceList{
allocateCIDR: allocateCIDR,
clusterCIDR: clusterCIDR,
subnetMaskSize: subnetMaskSize,
cidrSet: cidrSet,
instances: make(map[meta.Key]*baseInstance),
}
}
// getOrCreateBaseInstance lazily creates a new base instance, assigning if allocateCIDR is true
func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance {
bil.lock.Lock()
defer bil.lock.Unlock()
inst, found := bil.instances[*key]
if !found {
inst = &baseInstance{name: key.Name, zone: key.Zone}
if bil.allocateCIDR {
nextRange, _ := bil.cidrSet.AllocateNext()
inst.aliasRange = nextRange.String()
}
bil.instances[*key] = inst
}
return inst
}
// newGAGetHook creates a new closure with the current baseInstanceList to be used as a MockInstances.GetHook
func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {
return func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {
m.Lock.Lock()
defer m.Lock.Unlock()
if _, found := m.Objects[*key]; !found {
m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}
}
return false, nil, nil
}
}
// newBetaGetHook creates a new closure with the current baseInstanceList to be used as a MockBetaInstances.GetHook
func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {
return func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {
m.Lock.Lock()
defer m.Lock.Unlock()
if _, found := m.Objects[*key]; !found {
m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()}
}
return false, nil, nil
}
}
// newMockCloud returns a mock GCE instance with the appropriate handlers hooks
func (bil *baseInstanceList) newMockCloud() cloud.Cloud {
c := cloud.NewMockGCE(nil)
// insert hooks to lazy create a instance when needed
c.MockInstances.GetHook = bil.newGAGetHook()
c.MockBetaInstances.GetHook = bil.newBetaGetHook()
return c
}

View File

@ -0,0 +1,158 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipamperf
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"testing"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/controller/nodeipam"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
"k8s.io/kubernetes/test/integration/util"
)
func setupAllocator(apiURL string, config *Config, clusterCIDR, serviceCIDR *net.IPNet, subnetMaskSize int) (*clientset.Clientset, util.ShutdownFunc, error) {
controllerStopChan := make(chan struct{})
shutdownFunc := func() {
close(controllerStopChan)
}
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
Host: apiURL,
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
QPS: float32(config.KubeQPS),
Burst: config.KubeQPS,
})
sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
ipamController, err := nodeipam.NewNodeIpamController(
sharedInformer.Core().V1().Nodes(), config.Cloud, clientSet,
clusterCIDR, serviceCIDR, subnetMaskSize, config.AllocatorType,
)
if err != nil {
return nil, shutdownFunc, err
}
go ipamController.Run(controllerStopChan)
sharedInformer.Start(controllerStopChan)
return clientSet, shutdownFunc, nil
}
func runTest(t *testing.T, apiURL string, config *Config, clusterCIDR, serviceCIDR *net.IPNet, subnetMaskSize int) (*Results, error) {
t.Helper()
glog.Infof("Running test %s", t.Name())
defer deleteNodes(apiURL, config) // cleanup nodes on after controller shutdown
clientSet, shutdownFunc, err := setupAllocator(apiURL, config, clusterCIDR, serviceCIDR, subnetMaskSize)
if err != nil {
t.Fatalf("Error starting IPAM allocator: %v", err)
}
defer shutdownFunc()
o := NewObserver(clientSet, config.NumNodes)
if err := o.StartObserving(); err != nil {
t.Fatalf("Could not start test observer: %v", err)
}
if err := createNodes(apiURL, config); err != nil {
t.Fatalf("Could not create nodes: %v", err)
}
results := o.Results(t.Name(), config)
glog.Infof("Results: %s", results)
if !results.Succeeded {
t.Errorf("%s: Not allocations succeeded", t.Name())
}
return results, nil
}
func logResults(allResults []*Results) {
jStr, err := json.MarshalIndent(allResults, "", " ")
if err != nil {
glog.Errorf("Error formating results: %v", err)
return
}
if resultsLogFile != "" {
glog.Infof("Logging results to %s", resultsLogFile)
if err := ioutil.WriteFile(resultsLogFile, jStr, os.FileMode(0644)); err != nil {
glog.Errorf("Error logging results to %s: %v", resultsLogFile, err)
}
}
glog.Infof("AllResults:\n%s", string(jStr))
}
func TestPerformance(t *testing.T) {
if testing.Short() {
// TODO (#61854) find why flakiness is caused by etcd connectivity before enabling always
t.Skip("Skipping because we want to run short tests")
}
apiURL, masterShutdown := util.StartApiserver()
defer masterShutdown()
_, clusterCIDR, _ := net.ParseCIDR("10.96.0.0/11") // allows up to 8K nodes
_, serviceCIDR, _ := net.ParseCIDR("10.94.0.0/24") // does not matter for test - pick upto 250 services
subnetMaskSize := 24
var (
allResults []*Results
tests []*Config
)
if isCustom {
tests = append(tests, customConfig)
} else {
for _, numNodes := range []int{10, 100} {
for _, alloc := range []ipam.CIDRAllocatorType{ipam.RangeAllocatorType, ipam.CloudAllocatorType, ipam.IPAMFromClusterAllocatorType, ipam.IPAMFromCloudAllocatorType} {
tests = append(tests, &Config{AllocatorType: alloc, NumNodes: numNodes, CreateQPS: numNodes, KubeQPS: 10, CloudQPS: 10})
}
}
}
for _, test := range tests {
testName := fmt.Sprintf("%s-KubeQPS%d-Nodes%d", test.AllocatorType, test.KubeQPS, test.NumNodes)
t.Run(testName, func(t *testing.T) {
allocateCIDR := false
if test.AllocatorType == ipam.IPAMFromCloudAllocatorType || test.AllocatorType == ipam.CloudAllocatorType {
allocateCIDR = true
}
bil := newBaseInstanceList(allocateCIDR, clusterCIDR, subnetMaskSize)
cloud, err := util.NewMockGCECloud(bil.newMockCloud())
if err != nil {
t.Fatalf("Unable to create mock cloud: %v", err)
}
test.Cloud = cloud
if results, err := runTest(t, apiURL, test, clusterCIDR, serviceCIDR, subnetMaskSize); err == nil {
allResults = append(allResults, results)
}
})
}
logResults(allResults)
}

View File

@ -0,0 +1,66 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipamperf
import (
"flag"
"testing"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
"k8s.io/kubernetes/test/integration/framework"
)
var (
resultsLogFile string
isCustom bool
customConfig = &Config{
NumNodes: 10,
KubeQPS: 30,
CloudQPS: 30,
CreateQPS: 100,
AllocatorType: ipam.RangeAllocatorType,
}
)
func TestMain(m *testing.M) {
allocator := string(ipam.RangeAllocatorType)
flag.StringVar(&resultsLogFile, "log", "", "log file to write JSON results to")
flag.BoolVar(&isCustom, "custom", false, "enable custom test configuration")
flag.StringVar(&allocator, "allocator", allocator, "allocator to use")
flag.IntVar(&customConfig.KubeQPS, "kube-qps", customConfig.KubeQPS, "API server qps for allocations")
flag.IntVar(&customConfig.NumNodes, "num-nodes", 10, "number of nodes to simulate")
flag.IntVar(&customConfig.CreateQPS, "create-qps", customConfig.CreateQPS, "API server qps for node creation")
flag.IntVar(&customConfig.CloudQPS, "cloud-qps", customConfig.CloudQPS, "GCE Cloud qps limit")
flag.Parse()
switch allocator {
case string(ipam.RangeAllocatorType):
customConfig.AllocatorType = ipam.RangeAllocatorType
case string(ipam.CloudAllocatorType):
customConfig.AllocatorType = ipam.CloudAllocatorType
case string(ipam.IPAMFromCloudAllocatorType):
customConfig.AllocatorType = ipam.IPAMFromCloudAllocatorType
case string(ipam.IPAMFromClusterAllocatorType):
customConfig.AllocatorType = ipam.IPAMFromClusterAllocatorType
default:
glog.Fatalf("Unknown allocator type: %s", allocator)
}
framework.EtcdMain(m.Run)
}

View File

@ -0,0 +1,221 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipamperf
import (
"bytes"
"fmt"
"sort"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
)
// Config represents the test configuration that is being run
type Config struct {
CreateQPS int // rate at which nodes are created
KubeQPS int // rate for communication with kubernetes API
CloudQPS int // rate for communication with cloud endpoint
NumNodes int // number of nodes to created and monitored
AllocatorType ipam.CIDRAllocatorType // type of allocator to run
Cloud cloudprovider.Interface // cloud provider
}
type nodeTime struct {
added time.Time // observed time for when node was added
allocated time.Time // observed time for when node was assigned podCIDR
podCIDR string // the allocated podCIDR range
}
// Observer represents the handle to test observer that watches for node changes
// and tracks behavior
type Observer struct {
numAdded int // number of nodes observed added
numAllocated int // number of nodes observed allocated podCIDR
timing map[string]*nodeTime // per node timing
numNodes int // the number of nodes to expect
stopChan chan struct{} // for the shared informer
wg sync.WaitGroup
clientSet *clientset.Clientset
}
// JSONDuration is an alias of time.Duration to support custom Marshal code
type JSONDuration time.Duration
// NodeDuration represents the CIDR allocation time for each node
type NodeDuration struct {
Name string // node name
PodCIDR string // the podCIDR that was assigned to the node
Duration JSONDuration // how long it took to assign podCIDR
}
// Results represents the observed test results.
type Results struct {
Name string // name for the test
Config *Config // handle to the test config
Succeeded bool // whether all nodes were assigned podCIDR
MaxAllocTime JSONDuration // the maximum time take for assignment per node
TotalAllocTime JSONDuration // duration between first addition and last assignment
NodeAllocTime []NodeDuration // assignment time by node name
}
// NewObserver creates a new observer given a handle to the Clientset
func NewObserver(clientSet *clientset.Clientset, numNodes int) *Observer {
o := &Observer{
timing: map[string]*nodeTime{},
numNodes: numNodes,
clientSet: clientSet,
stopChan: make(chan struct{}),
}
return o
}
// StartObserving starts an asynchronous loop to monitor for node changes.
// Call Results() to get the test results after starting observer.
func (o *Observer) StartObserving() error {
o.monitor()
glog.Infof("Test observer started")
return nil
}
// Results returns the test results. It waits for the observer to finish
// and returns the computed results of the observations.
func (o *Observer) Results(name string, config *Config) *Results {
var (
firstAdd time.Time // earliest time any node was added (first node add)
lastAssignment time.Time // latest time any node was assignged CIDR (last node assignment)
)
o.wg.Wait()
close(o.stopChan) // shutdown the shared informer
results := &Results{
Name: name,
Config: config,
Succeeded: o.numAdded == o.numNodes && o.numAllocated == o.numNodes,
MaxAllocTime: 0,
NodeAllocTime: []NodeDuration{},
}
for name, nTime := range o.timing {
addFound := !nTime.added.IsZero()
if addFound && (firstAdd.IsZero() || nTime.added.Before(firstAdd)) {
firstAdd = nTime.added
}
cidrFound := !nTime.allocated.IsZero()
if cidrFound && nTime.allocated.After(lastAssignment) {
lastAssignment = nTime.allocated
}
if addFound && cidrFound {
allocTime := nTime.allocated.Sub(nTime.added)
if allocTime > time.Duration(results.MaxAllocTime) {
results.MaxAllocTime = JSONDuration(allocTime)
}
results.NodeAllocTime = append(results.NodeAllocTime, NodeDuration{
Name: name, PodCIDR: nTime.podCIDR, Duration: JSONDuration(allocTime),
})
}
}
results.TotalAllocTime = JSONDuration(lastAssignment.Sub(firstAdd))
sort.Slice(results.NodeAllocTime, func(i, j int) bool {
return results.NodeAllocTime[i].Duration > results.NodeAllocTime[j].Duration
})
return results
}
func (o *Observer) monitor() {
o.wg.Add(1)
sharedInformer := informers.NewSharedInformerFactory(o.clientSet, 1*time.Second)
nodeInformer := sharedInformer.Core().V1().Nodes().Informer()
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) (err error) {
name := node.GetName()
if node.Spec.PodCIDR != "" {
// ignore nodes that have PodCIDR (might be hold over from previous runs that did not get cleaned up)
return
}
nTime := &nodeTime{}
o.timing[name] = nTime
nTime.added = time.Now()
o.numAdded = o.numAdded + 1
return
}),
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) (err error) {
name := newNode.GetName()
nTime, found := o.timing[name]
if !found {
return // consistency check - ignore nodes we have not seen the add event for
}
// check if CIDR assigned and ignore redundant updates
if newNode.Spec.PodCIDR != "" && nTime.podCIDR == "" {
nTime.allocated = time.Now()
nTime.podCIDR = newNode.Spec.PodCIDR
o.numAllocated++
if o.numAllocated%10 == 0 {
glog.Infof("progress: %d/%d - %.2d%%", o.numAllocated, o.numNodes, (o.numAllocated * 100.0 / o.numNodes))
}
// do following check only if numAllocated is modified, as otherwise, redundant updates
// can cause wg.Done() to be called multiple times, causing a panic
if o.numAdded == o.numNodes && o.numAllocated == o.numNodes {
glog.Info("All nodes assigned podCIDR")
o.wg.Done()
}
}
return
}),
})
sharedInformer.Start(o.stopChan)
}
// String implements the Stringer interface and returns a multi-line representation
// of the test results.
func (results *Results) String() string {
var b bytes.Buffer
fmt.Fprintf(&b, "\n TestName: %s", results.Name)
fmt.Fprintf(&b, "\n NumNodes: %d, CreateQPS: %d, KubeQPS: %d, CloudQPS: %d, Allocator: %v",
results.Config.NumNodes, results.Config.CreateQPS, results.Config.KubeQPS,
results.Config.CloudQPS, results.Config.AllocatorType)
fmt.Fprintf(&b, "\n Succeeded: %v, TotalAllocTime: %v, MaxAllocTime: %v",
results.Succeeded, time.Duration(results.TotalAllocTime), time.Duration(results.MaxAllocTime))
fmt.Fprintf(&b, "\n %5s %-20s %-20s %s", "Num", "Node", "PodCIDR", "Duration (s)")
for i, d := range results.NodeAllocTime {
fmt.Fprintf(&b, "\n %5d %-20s %-20s %10.3f", i+1, d.Name, d.PodCIDR, time.Duration(d.Duration).Seconds())
}
return b.String()
}
// MarshalJSON implements the json.Marshaler interface
func (jDuration *JSONDuration) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", time.Duration(*jDuration).String())), nil
}
// UnmarshalJSON implements the json.Unmarshaler interface
func (jDuration *JSONDuration) UnmarshalJSON(b []byte) (err error) {
var d time.Duration
if d, err = time.ParseDuration(string(b[1 : len(b)-1])); err == nil {
*jDuration = JSONDuration(d)
}
return
}

View File

@ -0,0 +1,91 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
TEST_ARGS=""
RUN_PATTERN=".*"
PROFILE_OPTS=""
function usage() {
echo "usage: $0 <options>"
echo " -h display this help message"
echo " -d enable debug logs in tests"
echo " -r <pattern> regex pattern to match for tests"
echo " -o <filename> file to write JSON formatted results to"
echo " -p <id> enable cpu and memory profiles, output written to mem-<id>.out and cpu-<id>.out"
echo " -c enable custom test configuration"
echo " -a <name> allocator name, one of RangeAllocator, CloudAllocator, IPAMFromCluster, IPAMFromCloud"
echo " -k <num> api server qps for allocator"
echo " -n <num> number of nodes to simulate"
echo " -m <num> api server qps for node creation"
echo " -l <num> gce cloud endpoint qps"
exit 1
}
while getopts ":hdr:o:p:ca:k:n:m:l:" opt; do
case ${opt} in
d) TEST_ARGS="${TEST_ARGS} -v=6"
;;
r) RUN_PATTERN="${OPTARG}"
;;
o) TEST_ARGS="${TEST_ARGS} -log ${OPTARG}"
;;
p) PROFILE_OPTS="-memprofile mem-${OPTARG}.out -cpuprofile cpu-${OPTARG}.out"
;;
c) TEST_ARGS="${TEST_ARGS} -custom"
;;
a) TEST_ARGS="${TEST_ARGS} -allocator ${OPTARG}"
;;
k) TEST_ARGS="${TEST_ARGS} -kube-qps ${OPTARG}"
;;
n) TEST_ARGS="${TEST_ARGS} -num-nodes ${OPTARG}"
;;
m) TEST_ARGS="${TEST_ARGS} -create-qps ${OPTARG}"
;;
l) TEST_ARGS="${TEST_ARGS} -cloud-qps ${OPTARG}"
;;
h) usage
;;
\?) usage
;;
esac
done
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../../
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
DIR_BASENAME=$(dirname "${BASH_SOURCE}")
pushd ${DIR_BASENAME}
cleanup() {
popd 2> /dev/null
kube::etcd::cleanup
kube::log::status "performance test cleanup complete"
}
trap cleanup EXIT
kube::etcd::start
# Running IPAM tests. It might take a long time.
kube::log::status "performance test (IPAM) start"
go test ${PROFILE_OPTS} -test.run=${RUN_PATTERN} -test.timeout=60m -test.short=false -v -args ${TEST_ARGS}
kube::log::status "... IPAM tests finished."

View File

@ -0,0 +1,95 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipamperf
import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
)
const (
maxCreateRetries = 10
retryDelay = 10 * time.Second
)
var (
baseNodeTemplate = &v1.Node{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sample-node-",
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: v1.NodeRunning,
Conditions: []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionTrue},
},
},
}
)
func deleteNodes(apiURL string, config *Config) {
glog.Info("Deleting nodes")
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
Host: apiURL,
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
QPS: float32(config.CreateQPS),
Burst: config.CreateQPS,
})
noGrace := int64(0)
if err := clientSet.CoreV1().Nodes().DeleteCollection(&metav1.DeleteOptions{GracePeriodSeconds: &noGrace}, metav1.ListOptions{}); err != nil {
glog.Errorf("Error deleting node: %v", err)
}
}
func createNodes(apiURL string, config *Config) error {
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
Host: apiURL,
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
QPS: float32(config.CreateQPS),
Burst: config.CreateQPS,
})
glog.Infof("Creating %d nodes", config.NumNodes)
for i := 0; i < config.NumNodes; i++ {
var err error
for j := 0; j < maxCreateRetries; j++ {
if _, err = clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil && errors.IsServerTimeout(err) {
glog.Infof("Server timeout creating nodes, retrying after %v", retryDelay)
time.Sleep(retryDelay)
continue
}
break
}
if err != nil {
glog.Errorf("Error creating nodes: %v", err)
return err
}
}
glog.Infof("%d nodes created", config.NumNodes)
return nil
}