mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
1
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD
generated
vendored
@ -69,6 +69,7 @@ go_test(
|
||||
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
@ -91,7 +91,6 @@ func NewHorizontalController(
|
||||
) *HorizontalController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
|
||||
@ -223,7 +222,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
|
||||
switch metricSpec.Type {
|
||||
case autoscalingv2.ObjectMetricSourceType:
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetObjectMetricReplicas(currentReplicas, metricSpec.Object.TargetValue.MilliValue(), metricSpec.Object.MetricName, hpa.Namespace, &metricSpec.Object.Target)
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetObjectMetricReplicas(currentReplicas, metricSpec.Object.TargetValue.MilliValue(), metricSpec.Object.MetricName, hpa.Namespace, &metricSpec.Object.Target, selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
@ -317,7 +316,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
},
|
||||
}
|
||||
} else if metricSpec.External.TargetValue != nil {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetExternalMetricReplicas(currentReplicas, metricSpec.External.TargetValue.MilliValue(), metricSpec.External.MetricName, hpa.Namespace, metricSpec.External.MetricSelector)
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetExternalMetricReplicas(currentReplicas, metricSpec.External.TargetValue.MilliValue(), metricSpec.External.MetricName, hpa.Namespace, metricSpec.External.MetricSelector, selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
@ -621,7 +620,7 @@ func (a *HorizontalController) shouldScale(hpa *autoscalingv2.HorizontalPodAutos
|
||||
func (a *HorizontalController) scaleForResourceMappings(namespace, name string, mappings []*apimeta.RESTMapping) (*autoscalingv1.Scale, schema.GroupResource, error) {
|
||||
var firstErr error
|
||||
for i, mapping := range mappings {
|
||||
targetGR := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource).GroupResource()
|
||||
targetGR := mapping.Resource.GroupResource()
|
||||
scale, err := a.scaleNamespacer.Scales(namespace).Get(targetGR, name)
|
||||
if err == nil {
|
||||
return scale, targetGR, nil
|
||||
|
106
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
106
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -103,6 +104,7 @@ type testCase struct {
|
||||
reportedLevels []uint64
|
||||
reportedCPURequests []resource.Quantity
|
||||
reportedPodReadiness []v1.ConditionStatus
|
||||
reportedPodPhase []v1.PodPhase
|
||||
scaleUpdated bool
|
||||
statusUpdated bool
|
||||
eventCreated bool
|
||||
@ -123,6 +125,7 @@ type testCase struct {
|
||||
testClient *fake.Clientset
|
||||
testMetricsClient *metricsfake.Clientset
|
||||
testCMClient *cmfake.FakeCustomMetricsClient
|
||||
testEMClient *emfake.FakeExternalMetricsClient
|
||||
testScaleClient *scalefake.FakeScaleClient
|
||||
}
|
||||
|
||||
@ -245,15 +248,35 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < len(tc.reportedCPURequests); i++ {
|
||||
|
||||
specifiedCPURequests := tc.reportedCPURequests != nil
|
||||
|
||||
numPodsToCreate := int(tc.initialReplicas)
|
||||
if specifiedCPURequests {
|
||||
numPodsToCreate = len(tc.reportedCPURequests)
|
||||
}
|
||||
|
||||
for i := 0; i < numPodsToCreate; i++ {
|
||||
podReadiness := v1.ConditionTrue
|
||||
if tc.reportedPodReadiness != nil {
|
||||
podReadiness = tc.reportedPodReadiness[i]
|
||||
}
|
||||
|
||||
podPhase := v1.PodRunning
|
||||
if tc.reportedPodPhase != nil {
|
||||
podPhase = tc.reportedPodPhase[i]
|
||||
}
|
||||
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
|
||||
reportedCPURequest := resource.MustParse("1.0")
|
||||
if specifiedCPURequests {
|
||||
reportedCPURequest = tc.reportedCPURequests[i]
|
||||
}
|
||||
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Phase: podPhase,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
@ -268,12 +291,13 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
"name": podNamePrefix,
|
||||
},
|
||||
},
|
||||
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: tc.reportedCPURequests[i],
|
||||
v1.ResourceCPU: reportedCPURequest,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -488,7 +512,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
}
|
||||
|
||||
name := getForAction.GetName()
|
||||
mapper := legacyscheme.Registry.RESTMapper()
|
||||
mapper := testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)
|
||||
metrics := &cmapi.MetricValueList{}
|
||||
var matchedTarget *autoscalingv2.MetricSpec
|
||||
for i, target := range tc.metricsTarget {
|
||||
@ -499,7 +523,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
t.Logf("unable to get mapping for %s: %v", gk.String(), err)
|
||||
continue
|
||||
}
|
||||
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
||||
groupResource := mapping.Resource.GroupResource()
|
||||
|
||||
if getForAction.GetResource().Resource == groupResource.String() {
|
||||
matchedTarget = &tc.metricsTarget[i]
|
||||
@ -577,6 +601,9 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
|
||||
if tc.testCMClient != nil {
|
||||
testCMClient = tc.testCMClient
|
||||
}
|
||||
if tc.testEMClient != nil {
|
||||
testEMClient = tc.testEMClient
|
||||
}
|
||||
if tc.testScaleClient != nil {
|
||||
testScaleClient = tc.testScaleClient
|
||||
}
|
||||
@ -623,7 +650,7 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
|
||||
eventClient.Core(),
|
||||
testScaleClient,
|
||||
testClient.Autoscaling(),
|
||||
legacyscheme.Registry.RESTMapper(),
|
||||
testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
|
||||
replicaCalc,
|
||||
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
controller.NoResyncPeriodFunc(),
|
||||
@ -713,6 +740,24 @@ func TestScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpIgnoresFailedPods(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 2,
|
||||
desiredReplicas: 4,
|
||||
CPUTarget: 30,
|
||||
CPUCurrent: 60,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
reportedPodPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpDeployment(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@ -1017,6 +1062,24 @@ func TestScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownIgnoresFailedPods(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
desiredReplicas: 3,
|
||||
CPUTarget: 50,
|
||||
CPUCurrent: 28,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
reportedPodPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestTolerance(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 1,
|
||||
@ -1307,13 +1370,14 @@ func TestEmptyMetrics(t *testing.T) {
|
||||
|
||||
func TestEmptyCPURequest(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 1,
|
||||
maxReplicas: 5,
|
||||
initialReplicas: 1,
|
||||
desiredReplicas: 1,
|
||||
CPUTarget: 100,
|
||||
reportedLevels: []uint64{200},
|
||||
useMetricsAPI: true,
|
||||
minReplicas: 1,
|
||||
maxReplicas: 5,
|
||||
initialReplicas: 1,
|
||||
desiredReplicas: 1,
|
||||
CPUTarget: 100,
|
||||
reportedLevels: []uint64{200},
|
||||
reportedCPURequests: []resource.Quantity{},
|
||||
useMetricsAPI: true,
|
||||
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
||||
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
||||
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"},
|
||||
@ -1527,6 +1591,16 @@ func TestConditionFailedGetMetrics(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"FailedGetExternalMetric": {
|
||||
{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricSource{
|
||||
MetricSelector: &metav1.LabelSelector{},
|
||||
MetricName: "qps",
|
||||
TargetValue: resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for reason, specs := range metricsTargets {
|
||||
@ -1540,9 +1614,10 @@ func TestConditionFailedGetMetrics(t *testing.T) {
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
_, testMetricsClient, testCMClient, _, _ := tc.prepareTestClient(t)
|
||||
_, testMetricsClient, testCMClient, testEMClient, _ := tc.prepareTestClient(t)
|
||||
tc.testMetricsClient = testMetricsClient
|
||||
tc.testCMClient = testCMClient
|
||||
tc.testEMClient = testEMClient
|
||||
|
||||
testMetricsClient.PrependReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &metricsapi.PodMetricsList{}, fmt.Errorf("something went wrong")
|
||||
@ -1550,6 +1625,9 @@ func TestConditionFailedGetMetrics(t *testing.T) {
|
||||
testCMClient.PrependReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &cmapi.MetricValueList{}, fmt.Errorf("something went wrong")
|
||||
})
|
||||
testEMClient.PrependReactor("list", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &emapi.ExternalMetricValueList{}, fmt.Errorf("something went wrong")
|
||||
})
|
||||
|
||||
tc.expectedConditions = []autoscalingv1.HorizontalPodAutoscalerCondition{
|
||||
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
||||
|
31
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_horizontal_test.go
generated
vendored
31
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_horizontal_test.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -497,7 +498,7 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
eventClient.Core(),
|
||||
testScaleClient,
|
||||
testClient.Autoscaling(),
|
||||
legacyscheme.Registry.RESTMapper(),
|
||||
testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
|
||||
replicaCalc,
|
||||
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
controller.NoResyncPeriodFunc(),
|
||||
@ -524,7 +525,7 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
tc.verifyResults(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUp(t *testing.T) {
|
||||
func TestLegacyScaleUp(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -539,7 +540,7 @@ func LegacyTestScaleUp(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpUnreadyLessScale(t *testing.T) {
|
||||
func TestLegacyScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -556,7 +557,7 @@ func LegacyTestScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpUnreadyNoScale(t *testing.T) {
|
||||
func TestLegacyScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -573,7 +574,7 @@ func LegacyTestScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpDeployment(t *testing.T) {
|
||||
func TestLegacyScaleUpDeployment(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -593,7 +594,7 @@ func LegacyTestScaleUpDeployment(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpReplicaSet(t *testing.T) {
|
||||
func TestLegacyScaleUpReplicaSet(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -613,7 +614,7 @@ func LegacyTestScaleUpReplicaSet(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpCM(t *testing.T) {
|
||||
func TestLegacyScaleUpCM(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -635,7 +636,7 @@ func LegacyTestScaleUpCM(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
func TestLegacyScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -658,7 +659,7 @@ func LegacyTestScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
func TestLegacyScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -681,7 +682,7 @@ func LegacyTestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleDown(t *testing.T) {
|
||||
func TestLegacyScaleDown(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -696,7 +697,7 @@ func LegacyTestScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleDownCM(t *testing.T) {
|
||||
func TestLegacyScaleDownCM(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -718,7 +719,7 @@ func LegacyTestScaleDownCM(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
func TestLegacyScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -840,7 +841,7 @@ func LegacyTestMaxReplicas(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestSuperfluousMetrics(t *testing.T) {
|
||||
func TestLegacySuperfluousMetrics(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
@ -1022,7 +1023,7 @@ func LegacyTestComputedToleranceAlgImplementation(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleUpRCImmediately(t *testing.T) {
|
||||
func TestLegacyScaleUpRCImmediately(t *testing.T) {
|
||||
time := metav1.Time{Time: time.Now()}
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
@ -1038,7 +1039,7 @@ func LegacyTestScaleUpRCImmediately(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestScaleDownRCImmediately(t *testing.T) {
|
||||
func TestLegacyScaleDownRCImmediately(t *testing.T) {
|
||||
time := metav1.Time{Time: time.Now()}
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
|
46
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_replica_calculator_test.go
generated
vendored
46
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/legacy_replica_calculator_test.go
generated
vendored
@ -227,7 +227,7 @@ func (tc *legacyReplicaCalcTestCase) runTest(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
func TestLegacyReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
expectedError: fmt.Errorf("no metrics returned matched known pods"),
|
||||
@ -243,7 +243,7 @@ func LegacyTestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUp(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUp(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 5,
|
||||
@ -260,7 +260,7 @@ func LegacyTestReplicaCalcScaleUp(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
@ -278,7 +278,7 @@ func LegacyTestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
@ -296,7 +296,7 @@ func LegacyTestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUpCM(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUpCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
@ -310,7 +310,7 @@ func LegacyTestReplicaCalcScaleUpCM(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
@ -325,7 +325,7 @@ func LegacyTestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
@ -340,7 +340,7 @@ func LegacyTestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleDown(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleDown(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
@ -357,7 +357,7 @@ func LegacyTestReplicaCalcScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleDownCM(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleDownCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
@ -371,7 +371,7 @@ func LegacyTestReplicaCalcScaleDownCM(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
func TestLegacyReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 2,
|
||||
@ -389,7 +389,7 @@ func LegacyTestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcTolerance(t *testing.T) {
|
||||
func TestLegacyReplicaCalcTolerance(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
@ -406,7 +406,7 @@ func LegacyTestReplicaCalcTolerance(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcToleranceCM(t *testing.T) {
|
||||
func TestLegacyReplicaCalcToleranceCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
@ -420,7 +420,7 @@ func LegacyTestReplicaCalcToleranceCM(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
func TestLegacyReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 24,
|
||||
@ -436,7 +436,7 @@ func LegacyTestReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetrics(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 3,
|
||||
@ -453,7 +453,7 @@ func LegacyTestReplicaCalcMissingMetrics(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
func TestLegacyReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"),
|
||||
@ -468,7 +468,7 @@ func LegacyTestReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
func TestLegacyReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
expectedError: fmt.Errorf("missing request for"),
|
||||
@ -483,7 +483,7 @@ func LegacyTestReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
@ -500,7 +500,7 @@ func LegacyTestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
@ -517,7 +517,7 @@ func LegacyTestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
@ -534,7 +534,7 @@ func LegacyTestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
@ -552,7 +552,7 @@ func LegacyTestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
@ -570,7 +570,7 @@ func LegacyTestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func LegacyTestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 3,
|
||||
@ -591,7 +591,7 @@ func LegacyTestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
// TestComputedToleranceAlgImplementation is a regression test which
|
||||
// back-calculates a minimal percentage for downscaling based on a small percentage
|
||||
// increase in pod utilization which is calibrated against the tolerance value.
|
||||
func LegacyTestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
|
||||
func TestLegacyReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
|
||||
|
||||
startPods := int32(10)
|
||||
// 150 mCPU per pod.
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD
generated
vendored
@ -47,6 +47,7 @@ go_test(
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
|
||||
autoscalingapi "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -153,14 +154,14 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
|
||||
return true, &metrics, nil
|
||||
} else {
|
||||
name := getForAction.GetName()
|
||||
mapper := legacyscheme.Registry.RESTMapper()
|
||||
mapper := testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)
|
||||
assert.NotNil(t, tc.singleObject, "should have only requested a single-object metric when we asked for metrics for a single object")
|
||||
gk := schema.FromAPIVersionAndKind(tc.singleObject.APIVersion, tc.singleObject.Kind).GroupKind()
|
||||
mapping, err := mapper.RESTMapping(gk)
|
||||
if err != nil {
|
||||
return true, nil, fmt.Errorf("unable to get mapping for %s: %v", gk.String(), err)
|
||||
}
|
||||
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
||||
groupResource := mapping.Resource.GroupResource()
|
||||
|
||||
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
|
||||
assert.Equal(t, tc.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
|
||||
|
65
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
65
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
@ -88,7 +88,11 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
||||
|
||||
if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(&pod) {
|
||||
// save this pod name for later, but pretend it doesn't exist for now
|
||||
unreadyPods.Insert(pod.Name)
|
||||
if pod.Status.Phase != v1.PodFailed {
|
||||
// Failed pods should not be counted as unready pods as they will
|
||||
// not become running anymore.
|
||||
unreadyPods.Insert(pod.Name)
|
||||
}
|
||||
delete(metrics, pod.Name)
|
||||
continue
|
||||
}
|
||||
@ -272,7 +276,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
|
||||
|
||||
// GetObjectMetricReplicas calculates the desired replica count based on a target metric utilization (as a milli-value)
|
||||
// for the given object in the given namespace, and the current replica count.
|
||||
func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targetUtilization int64, metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targetUtilization int64, metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, selector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
utilization, timestamp, err = c.metricsClient.GetObjectMetric(metricName, namespace, objectRef)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v on %s %s/%s", metricName, objectRef.Kind, namespace, objectRef.Name, err)
|
||||
@ -283,48 +287,85 @@ func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targe
|
||||
// return the current replicas if the change would be too small
|
||||
return currentReplicas, utilization, timestamp, nil
|
||||
}
|
||||
replicaCount = int32(math.Ceil(usageRatio * float64(currentReplicas)))
|
||||
|
||||
readyPodCount, err := c.getReadyPodsCount(namespace, selector)
|
||||
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to calculate ready pods: %s", err)
|
||||
}
|
||||
|
||||
replicaCount = int32(math.Ceil(usageRatio * float64(readyPodCount)))
|
||||
|
||||
return replicaCount, utilization, timestamp, nil
|
||||
}
|
||||
|
||||
// @TODO(mattjmcnaughton) Many different functions in this module use variations
|
||||
// of this function. Make this function generic, so we don't repeat the same
|
||||
// logic in multiple places.
|
||||
func (c *ReplicaCalculator) getReadyPodsCount(namespace string, selector labels.Selector) (int64, error) {
|
||||
podList, err := c.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
|
||||
}
|
||||
|
||||
if len(podList.Items) == 0 {
|
||||
return 0, fmt.Errorf("no pods returned by selector while calculating replica count")
|
||||
}
|
||||
|
||||
readyPodCount := 0
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase == v1.PodRunning && podutil.IsPodReady(&pod) {
|
||||
readyPodCount++
|
||||
}
|
||||
}
|
||||
|
||||
return int64(readyPodCount), nil
|
||||
}
|
||||
|
||||
// GetExternalMetricReplicas calculates the desired replica count based on a
|
||||
// target metric value (as a milli-value) for the external metric in the given
|
||||
// namespace, and the current replica count.
|
||||
func (c *ReplicaCalculator) GetExternalMetricReplicas(currentReplicas int32, targetUtilization int64, metricName, namespace string, selector *metav1.LabelSelector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
labelSelector, err := metav1.LabelSelectorAsSelector(selector)
|
||||
func (c *ReplicaCalculator) GetExternalMetricReplicas(currentReplicas int32, targetUtilization int64, metricName, namespace string, metricSelector *metav1.LabelSelector, podSelector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, err
|
||||
}
|
||||
metrics, timestamp, err := c.metricsClient.GetExternalMetric(metricName, namespace, labelSelector)
|
||||
metrics, timestamp, err := c.metricsClient.GetExternalMetric(metricName, namespace, metricLabelSelector)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get external metric %s/%s/%+v: %s", namespace, metricName, selector, err)
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get external metric %s/%s/%+v: %s", namespace, metricName, metricSelector, err)
|
||||
}
|
||||
utilization = 0
|
||||
for _, val := range metrics {
|
||||
utilization = utilization + val
|
||||
}
|
||||
|
||||
readyPodCount, err := c.getReadyPodsCount(namespace, podSelector)
|
||||
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to calculate ready pods: %s", err)
|
||||
}
|
||||
|
||||
usageRatio := float64(utilization) / float64(targetUtilization)
|
||||
if math.Abs(1.0-usageRatio) <= c.tolerance {
|
||||
// return the current replicas if the change would be too small
|
||||
return currentReplicas, utilization, timestamp, nil
|
||||
}
|
||||
|
||||
return int32(math.Ceil(usageRatio * float64(currentReplicas))), utilization, timestamp, nil
|
||||
return int32(math.Ceil(usageRatio * float64(readyPodCount))), utilization, timestamp, nil
|
||||
}
|
||||
|
||||
// GetExternalPerPodMetricReplicas calculates the desired replica count based on a
|
||||
// target metric value per pod (as a milli-value) for the external metric in the
|
||||
// given namespace, and the current replica count.
|
||||
func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(currentReplicas int32, targetUtilizationPerPod int64, metricName, namespace string, selector *metav1.LabelSelector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
labelSelector, err := metav1.LabelSelectorAsSelector(selector)
|
||||
func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(currentReplicas int32, targetUtilizationPerPod int64, metricName, namespace string, metricSelector *metav1.LabelSelector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, err
|
||||
}
|
||||
metrics, timestamp, err := c.metricsClient.GetExternalMetric(metricName, namespace, labelSelector)
|
||||
metrics, timestamp, err := c.metricsClient.GetExternalMetric(metricName, namespace, metricLabelSelector)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get external metric %s/%s/%+v: %s", namespace, metricName, selector, err)
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get external metric %s/%s/%+v: %s", namespace, metricName, metricSelector, err)
|
||||
}
|
||||
utilization = 0
|
||||
for _, val := range metrics {
|
||||
|
99
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator_test.go
generated
vendored
99
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator_test.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -77,6 +78,7 @@ type replicaCalcTestCase struct {
|
||||
metric *metricInfo
|
||||
|
||||
podReadiness []v1.ConditionStatus
|
||||
podPhase []v1.PodPhase
|
||||
}
|
||||
|
||||
const (
|
||||
@ -90,15 +92,24 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < int(tc.currentReplicas); i++ {
|
||||
podsCount := int(tc.currentReplicas)
|
||||
// Failed pods are not included in tc.currentReplicas
|
||||
if tc.podPhase != nil && len(tc.podPhase) > podsCount {
|
||||
podsCount = len(tc.podPhase)
|
||||
}
|
||||
for i := 0; i < podsCount; i++ {
|
||||
podReadiness := v1.ConditionTrue
|
||||
if tc.podReadiness != nil {
|
||||
if tc.podReadiness != nil && i < len(tc.podReadiness) {
|
||||
podReadiness = tc.podReadiness[i]
|
||||
}
|
||||
podPhase := v1.PodRunning
|
||||
if tc.podPhase != nil {
|
||||
podPhase = tc.podPhase[i]
|
||||
}
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Phase: podPhase,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
@ -211,7 +222,7 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
||||
return true, &metrics, nil
|
||||
}
|
||||
name := getForAction.GetName()
|
||||
mapper := legacyscheme.Registry.RESTMapper()
|
||||
mapper := testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)
|
||||
metrics := &cmapi.MetricValueList{}
|
||||
assert.NotNil(t, tc.metric.singleObject, "should have only requested a single-object metric when calling GetObjectMetricReplicas")
|
||||
gk := schema.FromAPIVersionAndKind(tc.metric.singleObject.APIVersion, tc.metric.singleObject.Kind).GroupKind()
|
||||
@ -219,7 +230,7 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
||||
if err != nil {
|
||||
return true, nil, fmt.Errorf("unable to get mapping for %s: %v", gk.String(), err)
|
||||
}
|
||||
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
||||
groupResource := mapping.Resource.GroupResource()
|
||||
|
||||
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
|
||||
assert.Equal(t, tc.metric.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
|
||||
@ -313,10 +324,10 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
|
||||
var outTimestamp time.Time
|
||||
var err error
|
||||
if tc.metric.singleObject != nil {
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.singleObject)
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.singleObject, selector)
|
||||
} else if tc.metric.selector != nil {
|
||||
if tc.metric.targetUtilization > 0 {
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.selector)
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.selector, selector)
|
||||
} else if tc.metric.perPodTargetUtilization > 0 {
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUtilization, tc.metric.name, testNamespace, tc.metric.selector)
|
||||
}
|
||||
@ -405,6 +416,25 @@ func TestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpIgnoresFailedPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{500, 700},
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
expectedValue: numContainersPerPod * 600,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCM(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
@ -468,6 +498,26 @@ func TestReplicaCalcScaleUpCMObject(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCMObjectIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 5, // If we did not ignore unready pods, we'd expect 15 replicas.
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{50000},
|
||||
targetUtilization: 10000,
|
||||
expectedUtilization: 50000,
|
||||
singleObject: &autoscalingv2.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Name: "some-deployment",
|
||||
},
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCMExternal(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
@ -483,6 +533,22 @@ func TestReplicaCalcScaleUpCMExternal(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCMExternalIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 2, // Would expect 6 if we didn't ignore unready pods
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{8600},
|
||||
targetUtilization: 4400,
|
||||
expectedUtilization: 8600,
|
||||
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCMExternalNoLabels(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
@ -610,6 +676,25 @@ func TestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownIgnoresFailedPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
expectedValue: numContainersPerPod * 280,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcTolerance(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
|
Reference in New Issue
Block a user