mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor updates
This commit is contained in:
1
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
@ -31,6 +31,7 @@ go_library(
|
||||
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/autoscaling/OWNERS
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/autoscaling/OWNERS
generated
vendored
@ -1,14 +1,8 @@
|
||||
reviewers:
|
||||
- aleksandra-malinowska
|
||||
- bskiba
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
- mwielgus
|
||||
- sig-autoscaling-maintainers
|
||||
- wasylkowski
|
||||
approvers:
|
||||
- aleksandra-malinowska
|
||||
- bskiba
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
- mwielgus
|
||||
- sig-autoscaling-maintainers
|
||||
- wasylkowski
|
||||
|
148
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
148
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
@ -54,6 +54,7 @@ import (
|
||||
const (
|
||||
defaultTimeout = 3 * time.Minute
|
||||
resizeTimeout = 5 * time.Minute
|
||||
manualResizeTimeout = 6 * time.Minute
|
||||
scaleUpTimeout = 5 * time.Minute
|
||||
scaleUpTriggerTimeout = 2 * time.Minute
|
||||
scaleDownTimeout = 20 * time.Minute
|
||||
@ -245,36 +246,43 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with one n1-standard-4 machine")
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
|
||||
|
||||
By("Get memory available on new node, so we can account for it when creating RC")
|
||||
By("Getting memory available on new nodes, so we can account for it when creating RC")
|
||||
nodes := getPoolNodes(f, extraPoolName)
|
||||
Expect(len(nodes)).Should(Equal(1))
|
||||
extraMem := nodes[0].Status.Capacity[v1.ResourceMemory]
|
||||
extraMemMb := int((&extraMem).Value() / 1024 / 1024)
|
||||
Expect(len(nodes)).Should(Equal(extraNodes))
|
||||
extraMemMb := 0
|
||||
for _, node := range nodes {
|
||||
mem := node.Status.Capacity[v1.ResourceMemory]
|
||||
extraMemMb += int((&mem).Value() / 1024 / 1024)
|
||||
}
|
||||
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb+extraMemMb, false, defaultTimeout)
|
||||
By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
|
||||
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
|
||||
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify, that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
|
||||
func(size int) bool { return size >= nodeCount+extraNodes+1 }, scaleUpTimeout))
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
})
|
||||
|
||||
It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with one n1-standard-4 machine")
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
||||
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
|
||||
})
|
||||
@ -415,6 +423,15 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
}
|
||||
|
||||
if minSize == 0 {
|
||||
newSizes := make(map[string]int)
|
||||
for mig, size := range originalSizes {
|
||||
newSizes[mig] = size
|
||||
}
|
||||
newSizes[minMig] = 1
|
||||
setMigSizes(newSizes)
|
||||
}
|
||||
|
||||
removeLabels := func(nodesToClean sets.String) {
|
||||
By("Removing labels from nodes")
|
||||
for node := range nodesToClean {
|
||||
@ -436,7 +453,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
By("Waiting for new node to appear and annotating it")
|
||||
framework.WaitForGroupSize(minMig, int32(minSize+1))
|
||||
// Verify, that cluster size is increased
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
@ -495,23 +512,26 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with one n1-standard-4 machine")
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
||||
defer disableAutoscaler(extraPoolName, 1, 2)
|
||||
|
||||
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
|
||||
ReserveMemory(f, "memory-reservation", 2, int(2.5*float64(memAllocatableMb)), false, defaultTimeout)
|
||||
extraPods := extraNodes + 1
|
||||
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
|
||||
By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
|
||||
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
|
||||
// this issue.
|
||||
// TODO: Remove the extra time when GKE restart is fixed.
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+2, scaleUpTimeout+5*time.Minute))
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
|
||||
})
|
||||
|
||||
simpleScaleDownTest := func(unready int) {
|
||||
@ -528,7 +548,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
setMigSizes(newSizes)
|
||||
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
|
||||
func(size int) bool { return size >= increasedSize }, scaleUpTimeout, unready))
|
||||
func(size int) bool { return size >= increasedSize }, manualResizeTimeout, unready))
|
||||
|
||||
By("Some node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
|
||||
@ -551,9 +571,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-1", 3)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= increasedSize+3 }, scaleUpTimeout))
|
||||
func(size int) bool { return size >= increasedSize+extraNodes }, scaleUpTimeout))
|
||||
|
||||
By("Some node should be removed")
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
@ -561,7 +582,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
// this issue.
|
||||
// TODO: Remove the extra time when GKE restart is fixed.
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < increasedSize+3 }, scaleDownTimeout+10*time.Minute))
|
||||
func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute))
|
||||
})
|
||||
|
||||
It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
@ -662,23 +683,26 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
// verify the targeted node pool/MIG is of size 0
|
||||
gkeScaleToZero := func() {
|
||||
// GKE-specific setup
|
||||
By("Add a new node pool with 1 node and min size 0")
|
||||
By("Add a new node pool with size 1 and min size 0")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
|
||||
defer disableAutoscaler(extraPoolName, 0, 1)
|
||||
|
||||
ngNodes := getPoolNodes(f, extraPoolName)
|
||||
Expect(len(ngNodes) == 1).To(BeTrue())
|
||||
node := ngNodes[0]
|
||||
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
Expect(len(ngNodes)).To(Equal(extraNodes))
|
||||
for _, node := range ngNodes {
|
||||
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
}
|
||||
|
||||
// this part is identical
|
||||
drainNode(f, node)
|
||||
for _, node := range ngNodes {
|
||||
drainNode(f, node)
|
||||
}
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < nodeCount+1 }, scaleDownTimeout))
|
||||
func(size int) bool { return size <= nodeCount }, scaleDownTimeout))
|
||||
|
||||
// GKE-specific check
|
||||
newSize := getPoolSize(f, extraPoolName)
|
||||
@ -964,10 +988,10 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
}
|
||||
_, err = f.ClientSet.Policy().PodDisruptionBudgets(namespace).Create(pdb)
|
||||
_, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
|
||||
|
||||
defer func() {
|
||||
f.ClientSet.Policy().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{})
|
||||
f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{})
|
||||
}()
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
@ -1041,7 +1065,7 @@ func getClusterLocation() string {
|
||||
}
|
||||
}
|
||||
|
||||
func getGcloudCommand(commandTrack string, args []string) []string {
|
||||
func getGcloudCommandFromTrack(commandTrack string, args []string) []string {
|
||||
command := []string{"gcloud"}
|
||||
if commandTrack == "beta" || commandTrack == "alpha" {
|
||||
command = append(command, commandTrack)
|
||||
@ -1052,6 +1076,14 @@ func getGcloudCommand(commandTrack string, args []string) []string {
|
||||
return command
|
||||
}
|
||||
|
||||
func getGcloudCommand(args []string) []string {
|
||||
track := ""
|
||||
if isRegionalCluster() {
|
||||
track = "beta"
|
||||
}
|
||||
return getGcloudCommandFromTrack(track, args)
|
||||
}
|
||||
|
||||
func isRegionalCluster() bool {
|
||||
// TODO(bskiba): Use an appropriate indicator that the cluster is regional.
|
||||
return framework.TestContext.CloudConfig.MultiZone
|
||||
@ -1065,11 +1097,7 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error {
|
||||
"--min-nodes=" + strconv.Itoa(minCount),
|
||||
"--max-nodes=" + strconv.Itoa(maxCount),
|
||||
"--node-pool=" + nodePool}
|
||||
track := ""
|
||||
if isRegionalCluster() {
|
||||
track = "beta"
|
||||
}
|
||||
output, err := execCmd(getGcloudCommand(track, args)...).CombinedOutput()
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed config update result: %s", output)
|
||||
@ -1093,11 +1121,7 @@ func disableAutoscaler(nodePool string, minCount, maxCount int) error {
|
||||
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
|
||||
"--no-enable-autoscaling",
|
||||
"--node-pool=" + nodePool}
|
||||
track := ""
|
||||
if isRegionalCluster() {
|
||||
track = "beta"
|
||||
}
|
||||
output, err := execCmd(getGcloudCommand(track, args)...).CombinedOutput()
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed config update result: %s", output)
|
||||
@ -1209,10 +1233,8 @@ func disableAutoprovisioning() error {
|
||||
|
||||
func getNAPNodePools() ([]string, error) {
|
||||
if framework.ProviderIs("gke") {
|
||||
output, err := exec.Command("gcloud", "container", "node-pools", "list",
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone,
|
||||
"--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput()
|
||||
args := []string{"container", "node-pools", "list", "--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance groups: %v", string(output))
|
||||
return nil, err
|
||||
@ -1273,16 +1295,16 @@ func addNodePool(name string, machineType string, numNodes int) {
|
||||
"--machine-type=" + machineType,
|
||||
"--num-nodes=" + strconv.Itoa(numNodes),
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand("alpha", args)...).CombinedOutput()
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
glog.Infof("Creating node-pool %s: %s", name, output)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
}
|
||||
|
||||
func deleteNodePool(name string) {
|
||||
glog.Infof("Deleting node pool %s", name)
|
||||
args := []string{"container", "node-pools", "delete", name, "--quiet",
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand("alpha", args)...).CombinedOutput()
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Infof("Error: %v", err)
|
||||
}
|
||||
@ -1300,6 +1322,32 @@ func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
|
||||
return nodes
|
||||
}
|
||||
|
||||
// getPoolInitialSize returns the initial size of the node pool taking into
|
||||
// account that it may span multiple zones. In that case, node pool consists of
|
||||
// multiple migs all containing initialNodeCount nodes.
|
||||
func getPoolInitialSize(poolName string) int {
|
||||
// get initial node count
|
||||
args := []string{"container", "node-pools", "describe", poolName, "--quiet",
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
|
||||
"--format=value(initialNodeCount)"}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
glog.Infof("Node-pool initial size: %s", output)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
fields := strings.Fields(string(output))
|
||||
Expect(len(fields)).Should(Equal(1))
|
||||
size, err := strconv.ParseInt(fields[0], 10, 64)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// get number of node pools
|
||||
args = []string{"container", "node-pools", "describe", poolName, "--quiet",
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
|
||||
"--format=value(instanceGroupUrls)"}
|
||||
output, err = execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
framework.ExpectNoError(err, string(output))
|
||||
nodeGroupCount := len(strings.Split(string(output), ";"))
|
||||
return int(size) * nodeGroupCount
|
||||
}
|
||||
|
||||
func getPoolSize(f *framework.Framework, poolName string) int {
|
||||
size := 0
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
@ -1769,7 +1817,7 @@ func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[strin
|
||||
return false
|
||||
}
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, checkClusterSize, scaleUpTimeout))
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, checkClusterSize, manualResizeTimeout))
|
||||
return increasedSize
|
||||
}
|
||||
|
||||
@ -1904,7 +1952,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
||||
var finalErr error
|
||||
for _, newPdbName := range newPdbs {
|
||||
By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName))
|
||||
err := f.ClientSet.Policy().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
|
||||
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
// log error, but attempt to remove other pdbs
|
||||
glog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err)
|
||||
@ -1942,7 +1990,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.Policy().PodDisruptionBudgets("kube-system").Create(pdb)
|
||||
_, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(pdb)
|
||||
newPdbs = append(newPdbs, pdbName)
|
||||
|
||||
if err != nil {
|
||||
|
177
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_autoscaling.go
generated
vendored
177
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_autoscaling.go
generated
vendored
@ -20,18 +20,19 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2/google"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
as "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -46,15 +47,67 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
var kubeClient clientset.Interface
|
||||
|
||||
It("should autoscale with Custom Metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
kubeClient = f.ClientSet
|
||||
testHPA(f, kubeClient)
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.SimpleStackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
customMetricTest(f, f.ClientSet, simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
pod := monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue)
|
||||
customMetricTest(f, f.ClientSet, objectHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, pod, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
customMetricTest(f, f.ClientSet, simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
scaledReplicas := 3
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := int64(100)
|
||||
metric1Target := 2 * metric1Value
|
||||
// metric2 should cause a scale up
|
||||
metric2Value := int64(200)
|
||||
metric2Target := int64(0.5 * float64(metric2Value))
|
||||
containers := []monitoring.CustomMetricContainerSpec{
|
||||
{
|
||||
Name: "stackdriver-exporter-metric1",
|
||||
MetricName: "metric1",
|
||||
MetricValue: metric1Value,
|
||||
},
|
||||
{
|
||||
Name: "stackdriver-exporter-metric2",
|
||||
MetricName: "metric2",
|
||||
MetricValue: metric2Value,
|
||||
},
|
||||
}
|
||||
metricTargets := map[string]int64{"metric1": metric1Target, "metric2": metric2Target}
|
||||
deployment := monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers)
|
||||
customMetricTest(f, f.ClientSet, podsHPA(f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, metricTargets), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
})
|
||||
|
||||
func testHPA(f *framework.Framework, kubeClient clientset.Interface) {
|
||||
func customMetricTest(f *framework.Framework, kubeClient clientset.Interface, hpa *as.HorizontalPodAutoscaler,
|
||||
deployment *extensions.Deployment, pod *corev1.Pod, initialReplicas, scaledReplicas int) {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
@ -92,80 +145,89 @@ func testHPA(f *framework.Framework, kubeClient clientset.Interface) {
|
||||
defer monitoring.CleanupAdapter()
|
||||
|
||||
// Run application that exports the metric
|
||||
err = createDeploymentsToScale(f, kubeClient)
|
||||
err = createDeploymentToScale(f, kubeClient, deployment, pod)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
|
||||
}
|
||||
defer cleanupDeploymentsToScale(f, kubeClient)
|
||||
defer cleanupDeploymentsToScale(f, kubeClient, deployment, pod)
|
||||
|
||||
// Autoscale the deployments
|
||||
err = createPodsHPA(f, kubeClient)
|
||||
// Wait for the deployment to run
|
||||
waitForReplicas(deployment.ObjectMeta.Name, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, initialReplicas)
|
||||
|
||||
// Autoscale the deployment
|
||||
_, err = kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(hpa)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create 'Pods' HPA: %v", err)
|
||||
}
|
||||
err = createObjectHPA(f, kubeClient)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create 'Objects' HPA: %v", err)
|
||||
framework.Failf("Failed to create HPA: %v", err)
|
||||
}
|
||||
|
||||
waitForReplicas(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, 1)
|
||||
waitForReplicas(dummyDeploymentName, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, 1)
|
||||
waitForReplicas(deployment.ObjectMeta.Name, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, scaledReplicas)
|
||||
}
|
||||
|
||||
func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) error {
|
||||
_, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.Name, 2, 100))
|
||||
if err != nil {
|
||||
return err
|
||||
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) error {
|
||||
if deployment != nil {
|
||||
_, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100))
|
||||
if err != nil {
|
||||
return err
|
||||
if pod != nil {
|
||||
_, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.Name, 2, 100))
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface) {
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterDeployment, &metav1.DeleteOptions{})
|
||||
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterPod, &metav1.DeleteOptions{})
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(dummyDeploymentName, &metav1.DeleteOptions{})
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) {
|
||||
if deployment != nil {
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
if pod != nil {
|
||||
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(pod.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
func createPodsHPA(f *framework.Framework, cs clientset.Interface) error {
|
||||
func simplePodsHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
return podsHPA(namespace, stackdriverExporterDeployment, map[string]int64{monitoring.CustomMetricName: metricTarget})
|
||||
}
|
||||
|
||||
func podsHPA(namespace string, deploymentName string, metricTargets map[string]int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
_, err := cs.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(&as.HorizontalPodAutoscaler{
|
||||
metrics := []as.MetricSpec{}
|
||||
for metric, target := range metricTargets {
|
||||
metrics = append(metrics, as.MetricSpec{
|
||||
Type: as.PodsMetricSourceType,
|
||||
Pods: &as.PodsMetricSource{
|
||||
MetricName: metric,
|
||||
TargetAverageValue: *resource.NewQuantity(target, resource.DecimalSI),
|
||||
},
|
||||
})
|
||||
}
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-pods-hpa",
|
||||
Namespace: f.Namespace.ObjectMeta.Name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: []as.MetricSpec{
|
||||
{
|
||||
Type: as.PodsMetricSourceType,
|
||||
Pods: &as.PodsMetricSource{
|
||||
MetricName: monitoring.CustomMetricName,
|
||||
TargetAverageValue: *resource.NewQuantity(200, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
Metrics: metrics,
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: stackdriverExporterDeployment,
|
||||
Name: deploymentName,
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func createObjectHPA(f *framework.Framework, cs clientset.Interface) error {
|
||||
func objectHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
_, err := cs.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(&as.HorizontalPodAutoscaler{
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-objects-hpa",
|
||||
Namespace: f.Namespace.ObjectMeta.Name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: []as.MetricSpec{
|
||||
@ -177,7 +239,7 @@ func createObjectHPA(f *framework.Framework, cs clientset.Interface) error {
|
||||
Kind: "Pod",
|
||||
Name: stackdriverExporterPod,
|
||||
},
|
||||
TargetValue: *resource.NewQuantity(200, resource.DecimalSI),
|
||||
TargetValue: *resource.NewQuantity(metricTarget, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -189,14 +251,13 @@ func createObjectHPA(f *framework.Framework, cs clientset.Interface) error {
|
||||
Name: dummyDeploymentName,
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
deployment, err := cs.Extensions().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := cs.ExtensionsV1beta1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/autoscaling/dns_autoscaling.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/autoscaling/dns_autoscaling.go
generated
vendored
@ -104,7 +104,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
By("Restoring intial dns autoscaling parameters")
|
||||
By("Restoring initial dns autoscaling parameters")
|
||||
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for number of running and ready kube-dns pods recover")
|
||||
@ -157,13 +157,14 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
|
||||
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #55779 is fixed.
|
||||
It("[DisabledForLargeClusters] kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
|
||||
|
||||
By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
By("Restoring intial dns autoscaling parameters")
|
||||
By("Restoring initial dns autoscaling parameters")
|
||||
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
|
||||
}()
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
@ -66,8 +66,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
||||
})
|
||||
})
|
||||
|
||||
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #54637 is fixed.
|
||||
SIGDescribe("[DisabledForLargeClusters] ReplicationController light", func() {
|
||||
SIGDescribe("ReplicationController light", func() {
|
||||
It("Should scale from 1 pod to 2 pods", func() {
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 1,
|
||||
|
Reference in New Issue
Block a user