mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
5
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
@ -11,7 +11,7 @@ go_library(
|
||||
"autoscaling_timer.go",
|
||||
"cluster_autoscaler_scalability.go",
|
||||
"cluster_size_autoscaling.go",
|
||||
"custom_metrics_autoscaling.go",
|
||||
"custom_metrics_stackdriver_autoscaling.go",
|
||||
"dns_autoscaling.go",
|
||||
"framework.go",
|
||||
"horizontal_pod_autoscaling.go",
|
||||
@ -24,6 +24,7 @@ go_library(
|
||||
"//test/e2e/instrumentation/monitoring:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
@ -33,7 +34,7 @@ go_library(
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/autoscaling/autoscaling_timer.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/autoscaling/autoscaling_timer.go
generated
vendored
@ -93,7 +93,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
|
||||
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
|
||||
replicas := 1
|
||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset)
|
||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
defer resourceConsumer.CleanUp()
|
||||
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
|
||||
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go
generated
vendored
@ -31,6 +31,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -347,7 +348,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
timeToWait := 5 * time.Minute
|
||||
podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait)
|
||||
framework.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, podsConfig.Name)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
|
||||
|
||||
// Ensure that no new nodes have been added so far.
|
||||
Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(Equal(nodeCount))
|
||||
@ -417,7 +418,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
|
||||
}
|
||||
timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes))
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, config.extraPods.Name)
|
||||
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -432,7 +433,7 @@ func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabyte
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
MemRequest: int64(1024 * 1024 * megabytes / replicas),
|
||||
}
|
||||
@ -492,7 +493,7 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": port},
|
||||
MemRequest: request,
|
||||
@ -500,7 +501,7 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
|
||||
err := framework.RunRC(*config)
|
||||
framework.ExpectNoError(err)
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id)
|
||||
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
|
||||
}
|
||||
}
|
||||
|
||||
@ -540,7 +541,7 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
|
||||
framework.ExpectNoError(framework.RunRC(*rcConfig))
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id)
|
||||
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
|
||||
}
|
||||
}
|
||||
|
||||
|
217
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
217
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/api/scheduling/v1alpha1"
|
||||
schedulerapi "k8s.io/api/scheduling/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -45,6 +45,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -168,7 +169,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
By("Creating unschedulable pod")
|
||||
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
By("Waiting for scale up hoping it won't happen")
|
||||
// Verify that the appropriate event was generated
|
||||
@ -195,7 +196,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
simpleScaleUpTest := func(unready int) {
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
|
||||
@ -206,6 +207,108 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
func() { simpleScaleUpTest(0) })
|
||||
|
||||
supportedGpuTypes := []string{"nvidia-tesla-k80", "nvidia-tesla-v100", "nvidia-tesla-p100"}
|
||||
for _, gpuType := range supportedGpuTypes {
|
||||
gpuType := gpuType // create new variable for each iteration step
|
||||
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
|
||||
By("Schedule a pod which requires GPU")
|
||||
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 2)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
|
||||
By("Scale GPU deployment")
|
||||
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
|
||||
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
// Expect gpu pool to stay intact
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
|
||||
By("Remove the only POD requiring GPU")
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
})
|
||||
}
|
||||
|
||||
It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
func() {
|
||||
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) })
|
||||
@ -222,7 +325,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
By("Schedule more pods than can fit and wait for cluster to scale-up")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
|
||||
return s.status == caOngoingScaleUpStatus
|
||||
@ -265,8 +368,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
|
||||
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify, that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
@ -289,7 +392,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "host-port")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
|
||||
@ -304,12 +407,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
By("starting a pod with anti-affinity on each node")
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("scheduling extra pods with anti-affinity to existing ones")
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels))
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "extra-pod")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
||||
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||
@ -323,14 +426,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
"anti-affinity": "yes",
|
||||
}
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
|
||||
By("waiting for all pods before triggering scale up")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("creating a pod requesting EmptyDir")
|
||||
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "extra-pod")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
||||
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||
@ -359,11 +462,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
},
|
||||
Prebind: nil,
|
||||
}
|
||||
emptyStorageClass := ""
|
||||
pvcConfig := framework.PersistentVolumeClaimConfig{
|
||||
Annotations: map[string]string{
|
||||
v1.BetaStorageClassAnnotation: "",
|
||||
},
|
||||
Selector: selector,
|
||||
Selector: selector,
|
||||
StorageClassName: &emptyStorageClass,
|
||||
}
|
||||
|
||||
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
|
||||
@ -388,7 +490,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer func() {
|
||||
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod")
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
glog.Infof("RC and pods not using volume deleted")
|
||||
}()
|
||||
|
||||
@ -401,7 +503,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
volumes := buildVolumes(pv, pvc)
|
||||
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes))
|
||||
defer func() {
|
||||
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, pvcPodName)
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, pvcPodName)
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
}()
|
||||
|
||||
@ -506,7 +608,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
defer removeLabels(registeredNodes)
|
||||
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "node-selector"))
|
||||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector"))
|
||||
})
|
||||
|
||||
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
@ -524,8 +626,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
extraPods := extraNodes + 1
|
||||
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
|
||||
By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
|
||||
@ -663,7 +765,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
By("Run a scale-up test")
|
||||
ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
@ -776,7 +878,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
|
||||
} else {
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
time.Sleep(scaleUpTimeout)
|
||||
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
|
||||
@ -957,6 +1059,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func installNvidiaDriversDaemonSet() {
|
||||
By("Add daemonset which installs nvidia drivers")
|
||||
// the link differs from one in GKE documentation; discussed with @mindprince this one should be used
|
||||
framework.RunKubectlOrDie("apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml")
|
||||
}
|
||||
|
||||
func execCmd(args ...string) *exec.Cmd {
|
||||
glog.Infof("Executing: %s", strings.Join(args, " "))
|
||||
return exec.Command(args[0], args[1:]...)
|
||||
@ -974,7 +1082,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
|
||||
labelMap := map[string]string{"test_id": testID}
|
||||
framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0))
|
||||
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, "reschedulable-pods")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods")
|
||||
|
||||
By("Create a PodDisruptionBudget")
|
||||
minAvailable := intstr.FromInt(numPods - pdbSize)
|
||||
@ -1300,20 +1408,37 @@ func addNodePool(name string, machineType string, numNodes int) {
|
||||
framework.ExpectNoError(err, string(output))
|
||||
}
|
||||
|
||||
func addGpuNodePool(name string, gpuType string, gpuCount int, numNodes int) {
|
||||
args := []string{"beta", "container", "node-pools", "create", name, "--quiet",
|
||||
"--accelerator", "type=" + gpuType + ",count=" + strconv.Itoa(gpuCount),
|
||||
"--num-nodes=" + strconv.Itoa(numNodes),
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
glog.Infof("Creating node-pool %s: %s", name, output)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
}
|
||||
|
||||
func deleteNodePool(name string) {
|
||||
glog.Infof("Deleting node pool %s", name)
|
||||
args := []string{"container", "node-pools", "delete", name, "--quiet",
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Infof("Error: %v", err)
|
||||
}
|
||||
glog.Infof("Node-pool deletion output: %s", output)
|
||||
err := wait.ExponentialBackoff(
|
||||
wait.Backoff{Duration: 1 * time.Minute, Factor: float64(3), Steps: 3},
|
||||
func() (bool, error) {
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output)
|
||||
return false, nil
|
||||
}
|
||||
glog.Infof("Node-pool deletion output: %s", output)
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
|
||||
nodes := make([]*v1.Node, 0, 1)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
nodeList := framework.GetReadyNodesIncludingTaintedOrDie(f.ClientSet)
|
||||
for _, node := range nodeList.Items {
|
||||
if node.Labels[gkeNodepoolNameKey] == poolName {
|
||||
nodes = append(nodes, &node)
|
||||
@ -1388,7 +1513,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
MemRequest: request,
|
||||
NodeSelector: selector,
|
||||
@ -1404,7 +1529,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id)
|
||||
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
|
||||
}
|
||||
}
|
||||
framework.Failf("Failed to reserve memory within timeout")
|
||||
@ -1617,6 +1742,26 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd
|
||||
return fmt.Errorf("Failed to remove taint from node in allowed number of retries")
|
||||
}
|
||||
|
||||
func scheduleGpuPod(f *framework.Framework, id string) error {
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 1,
|
||||
GpuLimit: 1,
|
||||
Labels: map[string]string{"requires-gpu": "yes"},
|
||||
}
|
||||
|
||||
err := framework.RunRC(*config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create an RC running a given number of pods with anti-affinity
|
||||
func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string) error {
|
||||
config := &testutils.RCConfig{
|
||||
@ -1626,7 +1771,7 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
}
|
||||
@ -1650,7 +1795,7 @@ func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods in
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
}
|
||||
@ -1731,7 +1876,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: defaultTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 0,
|
||||
Labels: labels,
|
||||
MemRequest: memRequest,
|
||||
@ -1790,7 +1935,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
|
||||
func runReplicatedPodOnEachNodeWithCleanup(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) (func(), error) {
|
||||
err := runReplicatedPodOnEachNode(f, nodes, namespace, podsPerNode, id, labels, memRequest)
|
||||
return func() {
|
||||
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, id)
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, id)
|
||||
}, err
|
||||
}
|
||||
|
||||
@ -1853,7 +1998,7 @@ type scaleUpStatus struct {
|
||||
// Try to get timestamp from status.
|
||||
// Status configmap is not parsing-friendly, so evil regexpery follows.
|
||||
func getStatusTimestamp(status string) (time.Time, error) {
|
||||
timestampMatcher, err := regexp.Compile("Cluster-autoscaler status at \\s*([0-9\\-]+ [0-9]+:[0-9]+:[0-9]+\\.[0-9]+ \\+[0-9]+ [A-Za-z]+):")
|
||||
timestampMatcher, err := regexp.Compile("Cluster-autoscaler status at \\s*([0-9\\-]+ [0-9]+:[0-9]+:[0-9]+\\.[0-9]+ \\+[0-9]+ [A-Za-z]+)")
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
@ -2006,13 +2151,13 @@ func createPriorityClasses(f *framework.Framework) func() {
|
||||
highPriorityClassName: 1000,
|
||||
}
|
||||
for className, priority := range priorityClasses {
|
||||
_, err := f.ClientSet.SchedulingV1alpha1().PriorityClasses().Create(&v1alpha1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
|
||||
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
|
||||
}
|
||||
|
||||
return func() {
|
||||
for className := range priorityClasses {
|
||||
f.ClientSet.SchedulingV1alpha1().PriorityClasses().Delete(className, nil)
|
||||
f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
271
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_autoscaling.go
generated
vendored
271
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_autoscaling.go
generated
vendored
@ -1,271 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
as "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
const (
|
||||
stackdriverExporterDeployment = "stackdriver-exporter-deployment"
|
||||
dummyDeploymentName = "dummy-deployment"
|
||||
stackdriverExporterPod = "stackdriver-exporter-pod"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.SimpleStackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
customMetricTest(f, f.ClientSet, simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
pod := monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue)
|
||||
customMetricTest(f, f.ClientSet, objectHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, pod, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
customMetricTest(f, f.ClientSet, simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
scaledReplicas := 3
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := int64(100)
|
||||
metric1Target := 2 * metric1Value
|
||||
// metric2 should cause a scale up
|
||||
metric2Value := int64(200)
|
||||
metric2Target := int64(0.5 * float64(metric2Value))
|
||||
containers := []monitoring.CustomMetricContainerSpec{
|
||||
{
|
||||
Name: "stackdriver-exporter-metric1",
|
||||
MetricName: "metric1",
|
||||
MetricValue: metric1Value,
|
||||
},
|
||||
{
|
||||
Name: "stackdriver-exporter-metric2",
|
||||
MetricName: "metric2",
|
||||
MetricValue: metric2Value,
|
||||
},
|
||||
}
|
||||
metricTargets := map[string]int64{"metric1": metric1Target, "metric2": metric2Target}
|
||||
deployment := monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers)
|
||||
customMetricTest(f, f.ClientSet, podsHPA(f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, metricTargets), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
})
|
||||
|
||||
func customMetricTest(f *framework.Framework, kubeClient clientset.Interface, hpa *as.HorizontalPodAutoscaler,
|
||||
deployment *extensions.Deployment, pod *corev1.Pod, initialReplicas, scaledReplicas int) {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
|
||||
// Hack for running tests locally, needed to authenticate in Stackdriver
|
||||
// If this is your use case, create application default credentials:
|
||||
// $ gcloud auth application-default login
|
||||
// and uncomment following lines:
|
||||
/*
|
||||
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
||||
framework.Logf("Couldn't get application default credentials, %v", err)
|
||||
if err != nil {
|
||||
framework.Failf("Error accessing application default credentials, %v", err)
|
||||
}
|
||||
client := oauth2.NewClient(oauth2.NoContext, ts)
|
||||
*/
|
||||
|
||||
gcmService, err := gcm.New(client)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create gcm service, %v", err)
|
||||
}
|
||||
|
||||
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
||||
err = monitoring.CreateDescriptors(gcmService, projectId)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create metric descriptor: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupDescriptors(gcmService, projectId)
|
||||
|
||||
err = monitoring.CreateAdapter()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to set up: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupAdapter()
|
||||
|
||||
// Run application that exports the metric
|
||||
err = createDeploymentToScale(f, kubeClient, deployment, pod)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
|
||||
}
|
||||
defer cleanupDeploymentsToScale(f, kubeClient, deployment, pod)
|
||||
|
||||
// Wait for the deployment to run
|
||||
waitForReplicas(deployment.ObjectMeta.Name, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, initialReplicas)
|
||||
|
||||
// Autoscale the deployment
|
||||
_, err = kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(hpa)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create HPA: %v", err)
|
||||
}
|
||||
|
||||
waitForReplicas(deployment.ObjectMeta.Name, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, scaledReplicas)
|
||||
}
|
||||
|
||||
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) error {
|
||||
if deployment != nil {
|
||||
_, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if pod != nil {
|
||||
_, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) {
|
||||
if deployment != nil {
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
if pod != nil {
|
||||
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(pod.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
func simplePodsHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
return podsHPA(namespace, stackdriverExporterDeployment, map[string]int64{monitoring.CustomMetricName: metricTarget})
|
||||
}
|
||||
|
||||
func podsHPA(namespace string, deploymentName string, metricTargets map[string]int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
metrics := []as.MetricSpec{}
|
||||
for metric, target := range metricTargets {
|
||||
metrics = append(metrics, as.MetricSpec{
|
||||
Type: as.PodsMetricSourceType,
|
||||
Pods: &as.PodsMetricSource{
|
||||
MetricName: metric,
|
||||
TargetAverageValue: *resource.NewQuantity(target, resource.DecimalSI),
|
||||
},
|
||||
})
|
||||
}
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-pods-hpa",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: metrics,
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: deploymentName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func objectHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-objects-hpa",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: []as.MetricSpec{
|
||||
{
|
||||
Type: as.ObjectMetricSourceType,
|
||||
Object: &as.ObjectMetricSource{
|
||||
MetricName: monitoring.CustomMetricName,
|
||||
Target: as.CrossVersionObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: stackdriverExporterPod,
|
||||
},
|
||||
TargetValue: *resource.NewQuantity(metricTarget, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: dummyDeploymentName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
deployment, err := cs.ExtensionsV1beta1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
|
||||
}
|
||||
replicas := int(deployment.Status.ReadyReplicas)
|
||||
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
||||
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
||||
})
|
||||
if err != nil {
|
||||
framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
|
||||
}
|
||||
}
|
451
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go
generated
vendored
Normal file
451
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go
generated
vendored
Normal file
@ -0,0 +1,451 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
as "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
const (
|
||||
stackdriverExporterDeployment = "stackdriver-exporter-deployment"
|
||||
dummyDeploymentName = "dummy-deployment"
|
||||
stackdriverExporterPod = "stackdriver-exporter-pod"
|
||||
externalMetricValue = int64(85)
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 1,
|
||||
deployment: monitoring.SimpleStackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue),
|
||||
hpa: simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 1,
|
||||
// Metric exported by deployment is ignored
|
||||
deployment: monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 0 /* ignored */),
|
||||
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue),
|
||||
hpa: objectHPA(f.Namespace.ObjectMeta.Name, metricTarget)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with External Metric with target value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
metricTarget := 2 * metricValue
|
||||
metricTargets := map[string]externalMetricTarget{
|
||||
"target": {
|
||||
value: metricTarget,
|
||||
isAverage: false,
|
||||
},
|
||||
}
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 1,
|
||||
// Metric exported by deployment is ignored
|
||||
deployment: monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 0 /* ignored */),
|
||||
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, "target", metricValue),
|
||||
hpa: externalHPA(f.Namespace.ObjectMeta.Name, metricTargets)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with External Metric with target average value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
metricAverageTarget := 2 * metricValue
|
||||
metricTargets := map[string]externalMetricTarget{
|
||||
"target_average": {
|
||||
value: metricAverageTarget,
|
||||
isAverage: true,
|
||||
},
|
||||
}
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 1,
|
||||
// Metric exported by deployment is ignored
|
||||
deployment: monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 0 /* ignored */),
|
||||
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, "target_average", externalMetricValue),
|
||||
hpa: externalHPA(f.Namespace.ObjectMeta.Name, metricTargets)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 1,
|
||||
deployment: monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue),
|
||||
hpa: simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := int64(100)
|
||||
metric1Target := 2 * metric1Value
|
||||
// metric2 should cause a scale up
|
||||
metric2Value := int64(200)
|
||||
metric2Target := int64(0.5 * float64(metric2Value))
|
||||
containers := []monitoring.CustomMetricContainerSpec{
|
||||
{
|
||||
Name: "stackdriver-exporter-metric1",
|
||||
MetricName: "metric1",
|
||||
MetricValue: metric1Value,
|
||||
},
|
||||
{
|
||||
Name: "stackdriver-exporter-metric2",
|
||||
MetricName: "metric2",
|
||||
MetricValue: metric2Value,
|
||||
},
|
||||
}
|
||||
metricTargets := map[string]int64{"metric1": metric1Target, "metric2": metric2Target}
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 3,
|
||||
deployment: monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers),
|
||||
hpa: podsHPA(f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, metricTargets)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale up with two External metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := externalMetricValue
|
||||
metric1Target := 2 * metric1Value
|
||||
// metric2 should cause a scale up
|
||||
metric2Value := externalMetricValue
|
||||
metric2Target := int64(math.Ceil(0.5 * float64(metric2Value)))
|
||||
metricTargets := map[string]externalMetricTarget{
|
||||
"external_metric_1": {
|
||||
value: metric1Target,
|
||||
isAverage: false,
|
||||
},
|
||||
"external_metric_2": {
|
||||
value: metric2Target,
|
||||
isAverage: false,
|
||||
},
|
||||
}
|
||||
containers := []monitoring.CustomMetricContainerSpec{
|
||||
{
|
||||
Name: "stackdriver-exporter-metric1",
|
||||
MetricName: "external_metric_1",
|
||||
MetricValue: metric1Value,
|
||||
},
|
||||
{
|
||||
Name: "stackdriver-exporter-metric2",
|
||||
MetricName: "external_metric_2",
|
||||
MetricValue: metric2Value,
|
||||
},
|
||||
}
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 3,
|
||||
deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers),
|
||||
hpa: externalHPA(f.Namespace.ObjectMeta.Name, metricTargets)}
|
||||
tc.Run()
|
||||
})
|
||||
})
|
||||
|
||||
type CustomMetricTestCase struct {
|
||||
framework *framework.Framework
|
||||
hpa *as.HorizontalPodAutoscaler
|
||||
kubeClient clientset.Interface
|
||||
deployment *extensions.Deployment
|
||||
pod *corev1.Pod
|
||||
initialReplicas int
|
||||
scaledReplicas int
|
||||
}
|
||||
|
||||
func (tc *CustomMetricTestCase) Run() {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
|
||||
// Hack for running tests locally, needed to authenticate in Stackdriver
|
||||
// If this is your use case, create application default credentials:
|
||||
// $ gcloud auth application-default login
|
||||
// and uncomment following lines:
|
||||
/*
|
||||
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
||||
framework.Logf("Couldn't get application default credentials, %v", err)
|
||||
if err != nil {
|
||||
framework.Failf("Error accessing application default credentials, %v", err)
|
||||
}
|
||||
client := oauth2.NewClient(oauth2.NoContext, ts)
|
||||
*/
|
||||
|
||||
gcmService, err := gcm.New(client)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create gcm service, %v", err)
|
||||
}
|
||||
|
||||
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
||||
err = monitoring.CreateDescriptors(gcmService, projectId)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create metric descriptor: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupDescriptors(gcmService, projectId)
|
||||
|
||||
err = monitoring.CreateAdapter(monitoring.AdapterDefault)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to set up: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupAdapter(monitoring.AdapterDefault)
|
||||
|
||||
// Run application that exports the metric
|
||||
err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
|
||||
}
|
||||
defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
|
||||
|
||||
// Wait for the deployment to run
|
||||
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas)
|
||||
|
||||
// Autoscale the deployment
|
||||
_, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create HPA: %v", err)
|
||||
}
|
||||
defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
|
||||
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas)
|
||||
}
|
||||
|
||||
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) error {
|
||||
if deployment != nil {
|
||||
_, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if pod != nil {
|
||||
_, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) {
|
||||
if deployment != nil {
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
if pod != nil {
|
||||
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(pod.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
func simplePodsHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
return podsHPA(namespace, stackdriverExporterDeployment, map[string]int64{monitoring.CustomMetricName: metricTarget})
|
||||
}
|
||||
|
||||
func podsHPA(namespace string, deploymentName string, metricTargets map[string]int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
metrics := []as.MetricSpec{}
|
||||
for metric, target := range metricTargets {
|
||||
metrics = append(metrics, as.MetricSpec{
|
||||
Type: as.PodsMetricSourceType,
|
||||
Pods: &as.PodsMetricSource{
|
||||
MetricName: metric,
|
||||
TargetAverageValue: *resource.NewQuantity(target, resource.DecimalSI),
|
||||
},
|
||||
})
|
||||
}
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-pods-hpa",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: metrics,
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: deploymentName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func objectHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-objects-hpa",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: []as.MetricSpec{
|
||||
{
|
||||
Type: as.ObjectMetricSourceType,
|
||||
Object: &as.ObjectMetricSource{
|
||||
MetricName: monitoring.CustomMetricName,
|
||||
Target: as.CrossVersionObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: stackdriverExporterPod,
|
||||
},
|
||||
TargetValue: *resource.NewQuantity(metricTarget, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: dummyDeploymentName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type externalMetricTarget struct {
|
||||
value int64
|
||||
isAverage bool
|
||||
}
|
||||
|
||||
func externalHPA(namespace string, metricTargets map[string]externalMetricTarget) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
metricSpecs := []as.MetricSpec{}
|
||||
selector := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"resource.type": "gke_container"},
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "resource.labels.namespace_id",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
// TODO(bskiba): change default to real namespace name once it is available
|
||||
// from Stackdriver.
|
||||
Values: []string{"default", "dummy"},
|
||||
},
|
||||
{
|
||||
Key: "resource.labels.pod_id",
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
Values: []string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for metric, target := range metricTargets {
|
||||
var metricSpec as.MetricSpec
|
||||
metricSpec = as.MetricSpec{
|
||||
Type: as.ExternalMetricSourceType,
|
||||
External: &as.ExternalMetricSource{
|
||||
MetricName: "custom.googleapis.com|" + metric,
|
||||
MetricSelector: selector,
|
||||
},
|
||||
}
|
||||
if target.isAverage {
|
||||
metricSpec.External.TargetAverageValue = resource.NewQuantity(target.value, resource.DecimalSI)
|
||||
} else {
|
||||
metricSpec.External.TargetValue = resource.NewQuantity(target.value, resource.DecimalSI)
|
||||
}
|
||||
metricSpecs = append(metricSpecs, metricSpec)
|
||||
}
|
||||
hpa := &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-external-hpa",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: metricSpecs,
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: dummyDeploymentName,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return hpa
|
||||
}
|
||||
|
||||
func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
deployment, err := cs.ExtensionsV1beta1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
|
||||
}
|
||||
replicas := int(deployment.Status.ReadyReplicas)
|
||||
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
||||
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
||||
})
|
||||
if err != nil {
|
||||
framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
|
||||
}
|
||||
}
|
12
vendor/k8s.io/kubernetes/test/e2e/autoscaling/dns_autoscaling.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/autoscaling/dns_autoscaling.go
generated
vendored
@ -99,9 +99,11 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
// This test is separated because it is slow and need to run serially.
|
||||
// Will take around 5 minutes to run on a 4 nodes cluster.
|
||||
It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
By("Restoring initial dns autoscaling parameters")
|
||||
@ -117,25 +119,21 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
originalSizes := make(map[string]int)
|
||||
sum := 0
|
||||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := framework.GroupSize(mig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
sum += size
|
||||
}
|
||||
|
||||
By("Manually increase cluster size")
|
||||
increasedSize := 0
|
||||
increasedSizes := make(map[string]int)
|
||||
for key, val := range originalSizes {
|
||||
increasedSizes[key] = val + 1
|
||||
increasedSize += increasedSizes[key]
|
||||
}
|
||||
setMigSizes(increasedSizes)
|
||||
Expect(WaitForClusterSizeFunc(c,
|
||||
func(size int) bool { return size == increasedSize }, scaleUpTimeout)).NotTo(HaveOccurred())
|
||||
func(size int) bool { return size == numNodes+len(originalSizes) }, scaleUpTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
@ -151,7 +149,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
|
||||
By("Restoring cluster size")
|
||||
setMigSizes(originalSizes)
|
||||
Expect(framework.WaitForReadyNodes(c, sum, scaleDownTimeout)).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForReadyNodes(c, numNodes, scaleDownTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
@ -116,7 +116,7 @@ type HPAScaleTest struct {
|
||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
const timeToWait = 15 * time.Minute
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset)
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
defer rc.CleanUp()
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||
|
Reference in New Issue
Block a user