vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

64
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD generated vendored Normal file
View File

@ -0,0 +1,64 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"autoscaling_timer.go",
"cluster_autoscaler_scalability.go",
"cluster_size_autoscaling.go",
"custom_metrics_autoscaling.go",
"dns_autoscaling.go",
"framework.go",
"horizontal_pod_autoscaling.go",
],
importpath = "k8s.io/kubernetes/test/e2e/autoscaling",
deps = [
"//pkg/apis/core:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/instrumentation/monitoring:go_default_library",
"//test/e2e/scheduling:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

14
vendor/k8s.io/kubernetes/test/e2e/autoscaling/OWNERS generated vendored Normal file
View File

@ -0,0 +1,14 @@
reviewers:
- aleksandra-malinowska
- bskiba
- jszczepkowski
- MaciekPytel
- mwielgus
- wasylkowski
approvers:
- aleksandra-malinowska
- bskiba
- jszczepkowski
- MaciekPytel
- mwielgus
- wasylkowski

View File

@ -0,0 +1,115 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() {
f := framework.NewDefaultFramework("autoscaling")
SIGDescribe("Autoscaling a service", func() {
BeforeEach(func() {
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
framework.Skipf("test expects Cluster Autoscaler to be enabled")
}
})
Context("from 1 pod and 3 nodes to 8 pods and >=4 nodes", func() {
const nodesNum = 3 // Expect there to be 3 nodes before and after the test.
var nodeGroupName string // Set by BeforeEach, used by AfterEach to scale this node group down after the test.
var nodes *v1.NodeList // Set by BeforeEach, used by Measure to calculate CPU request based on node's sizes.
BeforeEach(func() {
// Make sure there is only 1 node group, otherwise this test becomes useless.
nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",")
if len(nodeGroups) != 1 {
framework.Skipf("test expects 1 node group, found %d", len(nodeGroups))
}
nodeGroupName = nodeGroups[0]
// Make sure the node group has exactly 'nodesNum' nodes, otherwise this test becomes useless.
nodeGroupSize, err := framework.GroupSize(nodeGroupName)
framework.ExpectNoError(err)
if nodeGroupSize != nodesNum {
framework.Skipf("test expects %d nodes, found %d", nodesNum, nodeGroupSize)
}
// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
nodes = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
schedulableCount := len(nodes.Items)
Expect(schedulableCount).To(Equal(nodeGroupSize), "not all nodes are schedulable")
})
AfterEach(func() {
// Attempt cleanup only if a node group was targeted for scale up.
// Otherwise the test was probably skipped and we'll get a gcloud error due to invalid parameters.
if len(nodeGroupName) > 0 {
// Scale down back to only 'nodesNum' nodes, as expected at the start of the test.
framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum))
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute))
}
})
Measure("takes less than 15 minutes", func(b Benchmarker) {
// Measured over multiple samples, scaling takes 10 +/- 2 minutes, so 15 minutes should be fully sufficient.
const timeToWait = 15 * time.Minute
// Calculate the CPU request of the service.
// This test expects that 8 pods will not fit in 'nodesNum' nodes, but will fit in >='nodesNum'+1 nodes.
// Make it so that 'nodesNum' pods fit perfectly per node.
nodeCpus := nodes.Items[0].Status.Allocatable[v1.ResourceCPU]
nodeCpuMillis := (&nodeCpus).MilliValue()
cpuRequestMillis := int64(nodeCpuMillis / nodesNum)
// Start the service we want to scale and wait for it to be up and running.
nodeMemoryBytes := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
replicas := 1
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset)
defer resourceConsumer.CleanUp()
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
// Enable Horizontal Pod Autoscaler with 50% target utilization and
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
targetCpuUtilizationPercent := int32(50)
hpa := common.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCpuUtilizationPercent, 1, 10)
defer common.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
cpuLoad := 8 * cpuRequestMillis * int64(targetCpuUtilizationPercent) / 100 // 8 pods utilized to the target level
resourceConsumer.ConsumeCPU(int(cpuLoad))
// Measure the time it takes for the service to scale to 8 pods with 50% CPU utilization each.
b.Time("total scale-up time", func() {
resourceConsumer.WaitForReplicas(8, timeToWait)
})
}, 1) // Increase to run the test more than once.
})
})
})

View File

@ -0,0 +1,550 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"encoding/json"
"fmt"
"math"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
memoryReservationTimeout = 5 * time.Minute
largeResizeTimeout = 8 * time.Minute
largeScaleUpTimeout = 10 * time.Minute
largeScaleDownTimeout = 20 * time.Minute
minute = 1 * time.Minute
maxNodes = 1000
)
type clusterPredicates struct {
nodes int
}
type scaleUpTestConfig struct {
initialNodes int
initialPods int
extraPods *testutils.RCConfig
expectedResult *clusterPredicates
}
var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", func() {
f := framework.NewDefaultFramework("autoscaling")
var c clientset.Interface
var nodeCount int
var coresPerNode int
var memCapacityMb int
var originalSizes map[string]int
var sum int
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke", "kubemark")
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
framework.Skipf("test expects Cluster Autoscaler to be enabled")
}
c = f.ClientSet
if originalSizes == nil {
originalSizes = make(map[string]int)
sum = 0
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
size, err := framework.GroupSize(mig)
framework.ExpectNoError(err)
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
originalSizes[mig] = size
sum += size
}
}
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
coresPerNode = int((&cpu).MilliValue() / 1000)
memCapacityMb = int((&mem).Value() / 1024 / 1024)
Expect(nodeCount).Should(Equal(sum))
if framework.ProviderIs("gke") {
val, err := isAutoscalerEnabled(3)
framework.ExpectNoError(err)
if !val {
err = enableAutoscaler("default-pool", 3, 5)
framework.ExpectNoError(err)
}
}
})
AfterEach(func() {
By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
makeSchedulableLoop:
for start := time.Now(); time.Since(start) < makeSchedulableTimeout; time.Sleep(makeSchedulableDelay) {
for _, n := range nodes.Items {
err = makeNodeSchedulable(c, &n, true)
switch err.(type) {
case CriticalAddonsOnlyError:
continue makeSchedulableLoop
default:
framework.ExpectNoError(err)
}
}
break
}
glog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
})
It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
perNodeReservation := int(float64(memCapacityMb) * 0.95)
replicasPerNode := 10
additionalNodes := maxNodes - nodeCount
replicas := additionalNodes * replicasPerNode
additionalReservation := additionalNodes * perNodeReservation
// saturate cluster
reservationCleanup := ReserveMemory(f, "some-pod", nodeCount*2, nodeCount*perNodeReservation, true, memoryReservationTimeout)
defer reservationCleanup()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
// configure pending pods & expected scale up
rcConfig := reserveMemoryRCConfig(f, "extra-pod-1", replicas, additionalReservation, largeScaleUpTimeout)
expectedResult := createClusterPredicates(nodeCount + additionalNodes)
config := createScaleUpTestConfig(nodeCount, nodeCount, rcConfig, expectedResult)
// run test
testCleanup := simpleScaleUpTest(f, config)
defer testCleanup()
})
It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func() {
perNodeReservation := int(float64(memCapacityMb) * 0.95)
replicasPerNode := 10
additionalNodes1 := int(math.Ceil(0.7 * maxNodes))
additionalNodes2 := int(math.Ceil(0.25 * maxNodes))
if additionalNodes1+additionalNodes2 > maxNodes {
additionalNodes2 = maxNodes - additionalNodes1
}
replicas1 := additionalNodes1 * replicasPerNode
replicas2 := additionalNodes2 * replicasPerNode
glog.Infof("cores per node: %v", coresPerNode)
// saturate cluster
initialReplicas := nodeCount
reservationCleanup := ReserveMemory(f, "some-pod", initialReplicas, nodeCount*perNodeReservation, true, memoryReservationTimeout)
defer reservationCleanup()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
glog.Infof("Reserved successfully")
// configure pending pods & expected scale up #1
rcConfig := reserveMemoryRCConfig(f, "extra-pod-1", replicas1, additionalNodes1*perNodeReservation, largeScaleUpTimeout)
expectedResult := createClusterPredicates(nodeCount + additionalNodes1)
config := createScaleUpTestConfig(nodeCount, nodeCount, rcConfig, expectedResult)
// run test #1
tolerateUnreadyNodes := additionalNodes1 / 20
tolerateUnreadyPods := (initialReplicas + replicas1) / 20
testCleanup1 := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods)
defer testCleanup1()
glog.Infof("Scaled up once")
// configure pending pods & expected scale up #2
rcConfig2 := reserveMemoryRCConfig(f, "extra-pod-2", replicas2, additionalNodes2*perNodeReservation, largeScaleUpTimeout)
expectedResult2 := createClusterPredicates(nodeCount + additionalNodes1 + additionalNodes2)
config2 := createScaleUpTestConfig(nodeCount+additionalNodes1, nodeCount+additionalNodes2, rcConfig2, expectedResult2)
// run test #2
tolerateUnreadyNodes = maxNodes / 20
tolerateUnreadyPods = (initialReplicas + replicas1 + replicas2) / 20
testCleanup2 := simpleScaleUpTestWithTolerance(f, config2, tolerateUnreadyNodes, tolerateUnreadyPods)
defer testCleanup2()
glog.Infof("Scaled up twice")
})
It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
perNodeReservation := int(float64(memCapacityMb) * 0.7)
replicas := int(math.Ceil(maxNodes * 0.7))
totalNodes := maxNodes
// resize cluster to totalNodes
newSizes := map[string]int{
anyKey(originalSizes): totalNodes,
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
// run replicas
rcConfig := reserveMemoryRCConfig(f, "some-pod", replicas, replicas*perNodeReservation, largeScaleUpTimeout)
expectedResult := createClusterPredicates(totalNodes)
config := createScaleUpTestConfig(totalNodes, totalNodes, rcConfig, expectedResult)
tolerateUnreadyNodes := totalNodes / 10
tolerateUnreadyPods := replicas / 10
testCleanup := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods)
defer testCleanup()
// check if empty nodes are scaled down
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool {
return size <= replicas+3 // leaving space for non-evictable kube-system pods
}, scaleDownTimeout))
})
It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func() {
perPodReservation := int(float64(memCapacityMb) * 0.01)
// underutilizedNodes are 10% full
underutilizedPerNodeReplicas := 10
// fullNodes are 70% full
fullPerNodeReplicas := 70
totalNodes := maxNodes
underutilizedRatio := 0.3
maxDelta := 30
// resize cluster to totalNodes
newSizes := map[string]int{
anyKey(originalSizes): totalNodes,
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
// annotate all nodes with no-scale-down
ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled"
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{
FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String(),
})
framework.ExpectNoError(err)
framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "true"))
// distribute pods using replication controllers taking up space that should
// be empty after pods are distributed
underutilizedNodesNum := int(float64(maxNodes) * underutilizedRatio)
fullNodesNum := totalNodes - underutilizedNodesNum
podDistribution := []podBatch{
{numNodes: fullNodesNum, podsPerNode: fullPerNodeReplicas},
{numNodes: underutilizedNodesNum, podsPerNode: underutilizedPerNodeReplicas}}
cleanup := distributeLoad(f, f.Namespace.Name, "10-70", podDistribution, perPodReservation,
int(0.95*float64(memCapacityMb)), map[string]string{}, largeScaleUpTimeout)
defer cleanup()
// enable scale down again
framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "false"))
// wait for scale down to start. Node deletion takes a long time, so we just
// wait for maximum of 30 nodes deleted
nodesToScaleDownCount := int(float64(totalNodes) * 0.1)
if nodesToScaleDownCount > maxDelta {
nodesToScaleDownCount = maxDelta
}
expectedSize := totalNodes - nodesToScaleDownCount
timeout := time.Duration(nodesToScaleDownCount)*time.Minute + scaleDownTimeout
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool {
return size <= expectedSize
}, timeout))
})
It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func() {
fullReservation := int(float64(memCapacityMb) * 0.9)
hostPortPodReservation := int(float64(memCapacityMb) * 0.3)
totalNodes := maxNodes
reservedPort := 4321
// resize cluster to totalNodes
newSizes := map[string]int{
anyKey(originalSizes): totalNodes,
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
divider := int(float64(totalNodes) * 0.7)
fullNodesCount := divider
underutilizedNodesCount := totalNodes - fullNodesCount
By("Reserving full nodes")
// run RC1 w/o host port
cleanup := ReserveMemory(f, "filling-pod", fullNodesCount, fullNodesCount*fullReservation, true, largeScaleUpTimeout*2)
defer cleanup()
By("Reserving host ports on remaining nodes")
// run RC2 w/ host port
cleanup2 := createHostPortPodsWithMemory(f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout)
defer cleanup2()
waitForAllCaPodsReadyInNamespace(f, c)
// wait and check scale down doesn't occur
By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes()))
time.Sleep(scaleDownTimeout)
By("Checking if the number of nodes is as expected")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
glog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
Expect(len(nodes.Items)).Should(Equal(totalNodes))
})
Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() {
// Start a number of pods saturating existing nodes.
perNodeReservation := int(float64(memCapacityMb) * 0.80)
replicasPerNode := 10
initialPodReplicas := nodeCount * replicasPerNode
initialPodsTotalMemory := nodeCount * perNodeReservation
reservationCleanup := ReserveMemory(f, "initial-pod", initialPodReplicas, initialPodsTotalMemory, true /* wait for pods to run */, memoryReservationTimeout)
defer reservationCleanup()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
// Configure a number of unschedulable pods.
unschedulableMemReservation := memCapacityMb * 2
unschedulablePodReplicas := 1000
totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas
timeToWait := 5 * time.Minute
podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait)
framework.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, podsConfig.Name)
// Ensure that no new nodes have been added so far.
Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(Equal(nodeCount))
// Start a number of schedulable pods to ensure CA reacts.
additionalNodes := maxNodes - nodeCount
replicas := additionalNodes * replicasPerNode
totalMemory := additionalNodes * perNodeReservation
rcConfig := reserveMemoryRCConfig(f, "extra-pod", replicas, totalMemory, largeScaleUpTimeout)
expectedResult := createClusterPredicates(nodeCount + additionalNodes)
config := createScaleUpTestConfig(nodeCount, initialPodReplicas, rcConfig, expectedResult)
// Test that scale up happens, allowing 1000 unschedulable pods not to be scheduled.
testCleanup := simpleScaleUpTestWithTolerance(f, config, 0, unschedulablePodReplicas)
defer testCleanup()
})
})
func makeUnschedulable(f *framework.Framework, nodes []v1.Node) error {
for _, node := range nodes {
err := makeNodeUnschedulable(f.ClientSet, &node)
if err != nil {
return err
}
}
return nil
}
func makeSchedulable(f *framework.Framework, nodes []v1.Node) error {
for _, node := range nodes {
err := makeNodeSchedulable(f.ClientSet, &node, false)
if err != nil {
return err
}
}
return nil
}
func anyKey(input map[string]int) string {
for k := range input {
return k
}
return ""
}
func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestConfig, tolerateMissingNodeCount int, tolerateMissingPodCount int) func() error {
// resize cluster to start size
// run rc based on config
By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
start := time.Now()
framework.ExpectNoError(framework.RunRC(*config.extraPods))
// check results
if tolerateMissingNodeCount > 0 {
// Tolerate some number of nodes not to be created.
minExpectedNodeCount := config.expectedResult.nodes - tolerateMissingNodeCount
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= minExpectedNodeCount }, scaleUpTimeout))
} else {
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
}
glog.Infof("cluster is increased")
if tolerateMissingPodCount > 0 {
framework.ExpectNoError(waitForCaPodsReadyInNamespace(f, f.ClientSet, tolerateMissingPodCount))
} else {
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
}
timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes))
return func() error {
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, config.extraPods.Name)
}
}
func simpleScaleUpTest(f *framework.Framework, config *scaleUpTestConfig) func() error {
return simpleScaleUpTestWithTolerance(f, config, 0, 0)
}
func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabytes int, timeout time.Duration) *testutils.RCConfig {
return &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: timeout,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas,
MemRequest: int64(1024 * 1024 * megabytes / replicas),
}
}
func createScaleUpTestConfig(nodes, pods int, extraPods *testutils.RCConfig, expectedResult *clusterPredicates) *scaleUpTestConfig {
return &scaleUpTestConfig{
initialNodes: nodes,
initialPods: pods,
extraPods: extraPods,
expectedResult: expectedResult,
}
}
func createClusterPredicates(nodes int) *clusterPredicates {
return &clusterPredicates{
nodes: nodes,
}
}
func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) error {
for _, node := range nodes {
oldData, err := json.Marshal(node)
if err != nil {
return err
}
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
node.Annotations[key] = value
newData, err := json.Marshal(node)
if err != nil {
return err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return err
}
_, err = f.ClientSet.CoreV1().Nodes().Patch(string(node.Name), types.StrategicMergePatchType, patchBytes)
if err != nil {
return err
}
}
return nil
}
func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, port, megabytes int, timeout time.Duration) func() error {
By(fmt.Sprintf("Running RC which reserves host port and memory"))
request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: timeout,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas,
HostPorts: map[string]int{"port1": port},
MemRequest: request,
}
err := framework.RunRC(*config)
framework.ExpectNoError(err)
return func() error {
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id)
}
}
type podBatch struct {
numNodes int
podsPerNode int
}
// distributeLoad distributes the pods in the way described by podDostribution,
// assuming all pods will have the same memory reservation and all nodes the same
// memory capacity. This allows us generate the load on the cluster in the exact
// way that we want.
//
// To achieve this we do the following:
// 1. Create replication controllers that eat up all the space that should be
// empty after setup, making sure they end up on different nodes by specifying
// conflicting host port
// 2. Create targer RC that will generate the load on the cluster
// 3. Remove the rcs created in 1.
func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch,
podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) func() error {
port := 8013
// Create load-distribution RCs with one pod per node, reserving all remaining
// memory to force the distribution of pods for the target RCs.
// The load-distribution RCs will be deleted on function return.
totalPods := 0
for i, podBatch := range podDistribution {
totalPods += podBatch.numNodes * podBatch.podsPerNode
remainingMem := nodeMemCapacity - podBatch.podsPerNode*podMemRequestMegabytes
replicas := podBatch.numNodes
cleanup := createHostPortPodsWithMemory(f, fmt.Sprintf("load-distribution%d", i), replicas, port, remainingMem*replicas, timeout)
defer cleanup()
}
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
// Create the target RC
rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout)
framework.ExpectNoError(framework.RunRC(*rcConfig))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
return func() error {
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id)
}
}
func timeTrack(start time.Time, name string) {
elapsed := time.Since(start)
glog.Infof("%s took %s", name, elapsed)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,210 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"context"
"time"
"golang.org/x/oauth2/google"
clientset "k8s.io/client-go/kubernetes"
. "github.com/onsi/ginkgo"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
gcm "google.golang.org/api/monitoring/v3"
as "k8s.io/api/autoscaling/v2beta1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
)
const (
stackdriverExporterDeployment = "stackdriver-exporter-deployment"
dummyDeploymentName = "dummy-deployment"
stackdriverExporterPod = "stackdriver-exporter-pod"
)
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
})
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
var kubeClient clientset.Interface
It("should autoscale with Custom Metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
kubeClient = f.ClientSet
testHPA(f, kubeClient)
})
})
func testHPA(f *framework.Framework, kubeClient clientset.Interface) {
projectId := framework.TestContext.CloudConfig.ProjectID
ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
// Hack for running tests locally, needed to authenticate in Stackdriver
// If this is your use case, create application default credentials:
// $ gcloud auth application-default login
// and uncomment following lines:
/*
ts, err := google.DefaultTokenSource(oauth2.NoContext)
framework.Logf("Couldn't get application default credentials, %v", err)
if err != nil {
framework.Failf("Error accessing application default credentials, %v", err)
}
client := oauth2.NewClient(oauth2.NoContext, ts)
*/
gcmService, err := gcm.New(client)
if err != nil {
framework.Failf("Failed to create gcm service, %v", err)
}
// Set up a cluster: create a custom metric and set up k8s-sd adapter
err = monitoring.CreateDescriptors(gcmService, projectId)
if err != nil {
framework.Failf("Failed to create metric descriptor: %v", err)
}
defer monitoring.CleanupDescriptors(gcmService, projectId)
err = monitoring.CreateAdapter()
if err != nil {
framework.Failf("Failed to set up: %v", err)
}
defer monitoring.CleanupAdapter()
// Run application that exports the metric
err = createDeploymentsToScale(f, kubeClient)
if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
}
defer cleanupDeploymentsToScale(f, kubeClient)
// Autoscale the deployments
err = createPodsHPA(f, kubeClient)
if err != nil {
framework.Failf("Failed to create 'Pods' HPA: %v", err)
}
err = createObjectHPA(f, kubeClient)
if err != nil {
framework.Failf("Failed to create 'Objects' HPA: %v", err)
}
waitForReplicas(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, 1)
waitForReplicas(dummyDeploymentName, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, 1)
}
func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) error {
_, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.Name, 2, 100))
if err != nil {
return err
}
_, err = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100))
if err != nil {
return err
}
_, err = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.Name, 2, 100))
return err
}
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface) {
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterDeployment, &metav1.DeleteOptions{})
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterPod, &metav1.DeleteOptions{})
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(dummyDeploymentName, &metav1.DeleteOptions{})
}
func createPodsHPA(f *framework.Framework, cs clientset.Interface) error {
var minReplicas int32 = 1
_, err := cs.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(&as.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: "custom-metrics-pods-hpa",
Namespace: f.Namespace.ObjectMeta.Name,
},
Spec: as.HorizontalPodAutoscalerSpec{
Metrics: []as.MetricSpec{
{
Type: as.PodsMetricSourceType,
Pods: &as.PodsMetricSource{
MetricName: monitoring.CustomMetricName,
TargetAverageValue: *resource.NewQuantity(200, resource.DecimalSI),
},
},
},
MaxReplicas: 3,
MinReplicas: &minReplicas,
ScaleTargetRef: as.CrossVersionObjectReference{
APIVersion: "extensions/v1beta1",
Kind: "Deployment",
Name: stackdriverExporterDeployment,
},
},
})
return err
}
func createObjectHPA(f *framework.Framework, cs clientset.Interface) error {
var minReplicas int32 = 1
_, err := cs.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(&as.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: "custom-metrics-objects-hpa",
Namespace: f.Namespace.ObjectMeta.Name,
},
Spec: as.HorizontalPodAutoscalerSpec{
Metrics: []as.MetricSpec{
{
Type: as.ObjectMetricSourceType,
Object: &as.ObjectMetricSource{
MetricName: monitoring.CustomMetricName,
Target: as.CrossVersionObjectReference{
Kind: "Pod",
Name: stackdriverExporterPod,
},
TargetValue: *resource.NewQuantity(200, resource.DecimalSI),
},
},
},
MaxReplicas: 3,
MinReplicas: &minReplicas,
ScaleTargetRef: as.CrossVersionObjectReference{
APIVersion: "extensions/v1beta1",
Kind: "Deployment",
Name: dummyDeploymentName,
},
},
})
return err
}
func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
interval := 20 * time.Second
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
deployment, err := cs.Extensions().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
}
replicas := int(deployment.Status.ReadyReplicas)
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
})
if err != nil {
framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
}
}

View File

@ -0,0 +1,365 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"fmt"
"math"
"strings"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
DNSdefaultTimeout = 5 * time.Minute
ClusterAddonLabelKey = "k8s-app"
DNSLabelName = "kube-dns"
DNSAutoscalerLabelName = "kube-dns-autoscaler"
)
var _ = SIGDescribe("DNS horizontal autoscaling", func() {
f := framework.NewDefaultFramework("dns-autoscaling")
var c clientset.Interface
var previousParams map[string]string
var originDNSReplicasCount int
var DNSParams_1 DNSParamsLinear
var DNSParams_2 DNSParamsLinear
var DNSParams_3 DNSParamsLinear
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
c = f.ClientSet
nodeCount := len(framework.GetReadySchedulableNodesOrDie(c).Items)
Expect(nodeCount).NotTo(BeZero())
By("Collecting original replicas count and DNS scaling params")
var err error
originDNSReplicasCount, err = getDNSReplicas(c)
Expect(err).NotTo(HaveOccurred())
pcm, err := fetchDNSScalingConfigMap(c)
Expect(err).NotTo(HaveOccurred())
previousParams = pcm.Data
if nodeCount <= 500 {
DNSParams_1 = DNSParamsLinear{
nodesPerReplica: 1,
}
DNSParams_2 = DNSParamsLinear{
nodesPerReplica: 2,
}
DNSParams_3 = DNSParamsLinear{
nodesPerReplica: 3,
coresPerReplica: 3,
}
} else {
// In large clusters, avoid creating/deleting too many DNS pods,
// it is supposed to be correctness test, not performance one.
// The default setup is: 256 cores/replica, 16 nodes/replica.
// With nodeCount > 500, nodes/13, nodes/14, nodes/15 and nodes/16
// are different numbers.
DNSParams_1 = DNSParamsLinear{
nodesPerReplica: 13,
}
DNSParams_2 = DNSParamsLinear{
nodesPerReplica: 14,
}
DNSParams_3 = DNSParamsLinear{
nodesPerReplica: 15,
coresPerReplica: 15,
}
}
})
// This test is separated because it is slow and need to run serially.
// Will take around 5 minutes to run on a 4 nodes cluster.
It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
By("Replace the dns autoscaling parameters with testing parameters")
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Restoring intial dns autoscaling parameters")
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
By("Wait for number of running and ready kube-dns pods recover")
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
_, err := framework.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout)
Expect(err).NotTo(HaveOccurred())
}()
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
originalSizes := make(map[string]int)
sum := 0
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
size, err := framework.GroupSize(mig)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
originalSizes[mig] = size
sum += size
}
By("Manually increase cluster size")
increasedSize := 0
increasedSizes := make(map[string]int)
for key, val := range originalSizes {
increasedSizes[key] = val + 1
increasedSize += increasedSizes[key]
}
setMigSizes(increasedSizes)
Expect(WaitForClusterSizeFunc(c,
func(size int) bool { return size == increasedSize }, scaleUpTimeout)).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_3)))
Expect(err).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("Restoring cluster size")
setMigSizes(originalSizes)
Expect(framework.WaitForReadyNodes(c, sum, scaleDownTimeout)).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
})
It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
By("Replace the dns autoscaling parameters with testing parameters")
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Restoring intial dns autoscaling parameters")
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
}()
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("--- Scenario: should scale kube-dns based on changed parameters ---")
By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_3)))
Expect(err).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---")
By("Delete the ConfigMap for autoscaler")
err = deleteDNSScalingConfigMap(c)
Expect(err).NotTo(HaveOccurred())
By("Wait for the ConfigMap got re-created")
_, err = waitForDNSConfigMapCreated(c, DNSdefaultTimeout)
Expect(err).NotTo(HaveOccurred())
By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_2)))
Expect(err).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_2)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("--- Scenario: should recover after autoscaler pod got deleted ---")
By("Delete the autoscaler pod for kube-dns")
Expect(deleteDNSAutoscalerPod(c)).NotTo(HaveOccurred())
By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
Expect(err).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
})
})
type DNSParamsLinear struct {
nodesPerReplica float64
coresPerReplica float64
min int
max int
}
type getExpectReplicasFunc func(c clientset.Interface) int
func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear) getExpectReplicasFunc {
return func(c clientset.Interface) int {
var replicasFromNodes float64
var replicasFromCores float64
nodes := framework.GetReadySchedulableNodesOrDie(c).Items
if params.nodesPerReplica > 0 {
replicasFromNodes = math.Ceil(float64(len(nodes)) / params.nodesPerReplica)
}
if params.coresPerReplica > 0 {
replicasFromCores = math.Ceil(float64(getScheduableCores(nodes)) / params.coresPerReplica)
}
return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores)))
}
}
func getScheduableCores(nodes []v1.Node) int64 {
var sc resource.Quantity
for _, node := range nodes {
if !node.Spec.Unschedulable {
sc.Add(node.Status.Capacity[v1.ResourceCPU])
}
}
scInt64, scOk := sc.AsInt64()
if !scOk {
framework.Logf("Unable to compute integer values of schedulable cores in the cluster")
return 0
}
return scInt64
}
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
if err != nil {
return nil, err
}
return cm, nil
}
func deleteDNSScalingConfigMap(c clientset.Interface) error {
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
return err
}
framework.Logf("DNS autoscaling ConfigMap deleted.")
return nil
}
func packLinearParams(params *DNSParamsLinear) map[string]string {
paramsMap := make(map[string]string)
paramsMap["linear"] = fmt.Sprintf("{\"nodesPerReplica\": %v,\"coresPerReplica\": %v,\"min\": %v,\"max\": %v}",
params.nodesPerReplica,
params.coresPerReplica,
params.min,
params.max)
return paramsMap
}
func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap {
configMap := v1.ConfigMap{}
configMap.ObjectMeta.Name = DNSAutoscalerLabelName
configMap.ObjectMeta.Namespace = metav1.NamespaceSystem
configMap.Data = params
return &configMap
}
func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error {
_, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(configMap)
if err != nil {
return err
}
framework.Logf("DNS autoscaling ConfigMap updated.")
return nil
}
func getDNSReplicas(c clientset.Interface) (int, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
deployments, err := c.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).List(listOpts)
if err != nil {
return 0, err
}
if len(deployments.Items) != 1 {
return 0, fmt.Errorf("expected 1 DNS deployment, got %v", len(deployments.Items))
}
deployment := deployments.Items[0]
return int(*(deployment.Spec.Replicas)), nil
}
func deleteDNSAutoscalerPod(c clientset.Interface) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
if err != nil {
return err
}
if len(pods.Items) != 1 {
return fmt.Errorf("expected 1 autoscaler pod, got %v", len(pods.Items))
}
podName := pods.Items[0].Name
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
return err
}
framework.Logf("DNS autoscaling pod %v deleted.", podName)
return nil
}
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
var current int
var expected int
framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
condition := func() (bool, error) {
current, err = getDNSReplicas(c)
if err != nil {
return false, err
}
expected = getExpected(c)
if current != expected {
framework.Logf("Replicas not as expected: got %v, expected %v", current, expected)
return false, nil
}
return true, nil
}
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
}
framework.Logf("kube-dns reaches expected replicas: %v", expected)
return nil
}
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
condition := func() (bool, error) {
configMap, err = fetchDNSScalingConfigMap(c)
if err != nil {
return false, nil
}
return true, nil
}
if err = wait.Poll(time.Second, timeout, condition); err != nil {
return nil, fmt.Errorf("err waiting for DNS autoscaling ConfigMap got re-created: %v", err)
}
return configMap, nil
}

View File

@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import "github.com/onsi/ginkgo"
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-autoscaling] "+text, body)
}

View File

@ -0,0 +1,172 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"time"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
// These tests don't seem to be running properly in parallel: issue: #20338.
//
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", func() {
var rc *common.ResourceConsumer
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1"
SIGDescribe("[Serial] [Slow] Deployment", func() {
// CPU tests via deployments
It(titleUp, func() {
scaleUp("test-deployment", common.KindDeployment, false, rc, f)
})
It(titleDown, func() {
scaleDown("test-deployment", common.KindDeployment, false, rc, f)
})
})
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via deployments
It(titleUp, func() {
scaleUp("rs", common.KindReplicaSet, false, rc, f)
})
It(titleDown, func() {
scaleDown("rs", common.KindReplicaSet, false, rc, f)
})
})
// These tests take ~20 minutes each.
SIGDescribe("[Serial] [Slow] ReplicationController", func() {
// CPU tests via replication controllers
It(titleUp+" and verify decision stability", func() {
scaleUp("rc", common.KindRC, true, rc, f)
})
It(titleDown+" and verify decision stability", func() {
scaleDown("rc", common.KindRC, true, rc, f)
})
})
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #54637 is fixed.
SIGDescribe("[DisabledForLargeClusters] ReplicationController light", func() {
It("Should scale from 1 pod to 2 pods", func() {
scaleTest := &HPAScaleTest{
initPods: 1,
totalInitialCPUUsage: 150,
perPodCPURequest: 200,
targetCPUUtilizationPercent: 50,
minPods: 1,
maxPods: 2,
firstScale: 2,
}
scaleTest.run("rc-light", common.KindRC, rc, f)
})
It("Should scale from 2 pods to 1 pod", func() {
scaleTest := &HPAScaleTest{
initPods: 2,
totalInitialCPUUsage: 50,
perPodCPURequest: 200,
targetCPUUtilizationPercent: 50,
minPods: 1,
maxPods: 2,
firstScale: 1,
}
scaleTest.run("rc-light", common.KindRC, rc, f)
})
})
})
// HPAScaleTest struct is used by the scale(...) function.
type HPAScaleTest struct {
initPods int32
totalInitialCPUUsage int32
perPodCPURequest int64
targetCPUUtilizationPercent int32
minPods int32
maxPods int32
firstScale int32
firstScaleStasis time.Duration
cpuBurst int
secondScale int32
secondScaleStasis time.Duration
}
// run is a method which runs an HPA lifecycle, from a starting state, to an expected
// The initial state is defined by the initPods parameter.
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
const timeToWait = 15 * time.Minute
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset)
defer rc.CleanUp()
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
rc.WaitForReplicas(int(scaleTest.firstScale), timeToWait)
if scaleTest.firstScaleStasis > 0 {
rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis)
}
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst)
rc.WaitForReplicas(int(scaleTest.secondScale), timeToWait)
}
}
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
}
scaleTest := &HPAScaleTest{
initPods: 1,
totalInitialCPUUsage: 250,
perPodCPURequest: 500,
targetCPUUtilizationPercent: 20,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
cpuBurst: 700,
secondScale: 5,
}
scaleTest.run(name, kind, rc, f)
}
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
}
scaleTest := &HPAScaleTest{
initPods: 5,
totalInitialCPUUsage: 375,
perPodCPURequest: 500,
targetCPUUtilizationPercent: 30,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
cpuBurst: 10,
secondScale: 1,
}
scaleTest.run(name, kind, rc, f)
}