Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -1,10 +1,4 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
@ -15,35 +9,38 @@ go_library(
"replica_calculator.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/podautoscaler/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/autoscaling/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/autoscaling/v1:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -64,31 +61,33 @@ go_test(
"//pkg/apis/extensions/install:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/podautoscaler/metrics:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/scale/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta2:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/external_metrics/v1beta1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics/fake:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/external_metrics/fake:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/scale/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/external_metrics/v1beta1:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/custom_metrics/fake:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/external_metrics/fake:go_default_library",
],
)
@ -106,4 +105,5 @@ filegroup(
"//pkg/controller/podautoscaler/metrics:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -11,3 +11,5 @@ approvers:
- piosz
- jszczepkowski
- MaciekPytel
labels:
- sig/autoscaling

View File

@ -21,9 +21,8 @@ import (
"math"
"time"
"github.com/golang/glog"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
@ -36,16 +35,20 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
autoscalinginformers "k8s.io/client-go/informers/autoscaling/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes/scheme"
autoscalingclient "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
autoscalinglisters "k8s.io/client-go/listers/autoscaling/v1"
corelisters "k8s.io/client-go/listers/core/v1"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/controller"
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
)
var (
@ -53,6 +56,11 @@ var (
scaleUpLimitMinimum = 4.0
)
type timestampedRecommendation struct {
recommendation int32
timestamp time.Time
}
// HorizontalController is responsible for the synchronizing HPA objects stored
// in the system with the actual deployments/replication controllers they
// control.
@ -64,16 +72,23 @@ type HorizontalController struct {
replicaCalc *ReplicaCalculator
eventRecorder record.EventRecorder
upscaleForbiddenWindow time.Duration
downscaleForbiddenWindow time.Duration
downscaleStabilisationWindow time.Duration
// hpaLister is able to list/get HPAs from the shared cache from the informer passed in to
// NewHorizontalController.
hpaLister autoscalinglisters.HorizontalPodAutoscalerLister
hpaListerSynced cache.InformerSynced
// podLister is able to list/get Pods from the shared cache from the informer passed in to
// NewHorizontalController.
podLister corelisters.PodLister
podListerSynced cache.InformerSynced
// Controllers that need to be synced
queue workqueue.RateLimitingInterface
// Latest unstabilized recommendations for each autoscaler.
recommendations map[string][]timestampedRecommendation
}
// NewHorizontalController creates a new HorizontalController.
@ -82,27 +97,29 @@ func NewHorizontalController(
scaleNamespacer scaleclient.ScalesGetter,
hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter,
mapper apimeta.RESTMapper,
replicaCalc *ReplicaCalculator,
metricsClient metricsclient.MetricsClient,
hpaInformer autoscalinginformers.HorizontalPodAutoscalerInformer,
podInformer coreinformers.PodInformer,
resyncPeriod time.Duration,
upscaleForbiddenWindow time.Duration,
downscaleForbiddenWindow time.Duration,
downscaleStabilisationWindow time.Duration,
tolerance float64,
cpuInitializationPeriod,
delayOfInitialReadinessStatus time.Duration,
) *HorizontalController {
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(glog.Infof)
broadcaster.StartLogging(klog.Infof)
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"})
hpaController := &HorizontalController{
replicaCalc: replicaCalc,
eventRecorder: recorder,
scaleNamespacer: scaleNamespacer,
hpaNamespacer: hpaNamespacer,
upscaleForbiddenWindow: upscaleForbiddenWindow,
downscaleForbiddenWindow: downscaleForbiddenWindow,
queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"),
mapper: mapper,
eventRecorder: recorder,
scaleNamespacer: scaleNamespacer,
hpaNamespacer: hpaNamespacer,
downscaleStabilisationWindow: downscaleStabilisationWindow,
queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"),
mapper: mapper,
recommendations: map[string][]timestampedRecommendation{},
}
hpaInformer.Informer().AddEventHandlerWithResyncPeriod(
@ -116,6 +133,18 @@ func NewHorizontalController(
hpaController.hpaLister = hpaInformer.Lister()
hpaController.hpaListerSynced = hpaInformer.Informer().HasSynced
hpaController.podLister = podInformer.Lister()
hpaController.podListerSynced = podInformer.Informer().HasSynced
replicaCalc := NewReplicaCalculator(
metricsClient,
hpaController.podLister,
tolerance,
cpuInitializationPeriod,
delayOfInitialReadinessStatus,
)
hpaController.replicaCalc = replicaCalc
return hpaController
}
@ -124,10 +153,10 @@ func (a *HorizontalController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer a.queue.ShutDown()
glog.Infof("Starting HPA controller")
defer glog.Infof("Shutting down HPA controller")
klog.Infof("Starting HPA controller")
defer klog.Infof("Shutting down HPA controller")
if !controller.WaitForCacheSync("HPA", stopCh, a.hpaListerSynced) {
if !controller.WaitForCacheSync("HPA", stopCh, a.hpaListerSynced, a.podListerSynced) {
return
}
@ -168,7 +197,7 @@ func (a *HorizontalController) deleteHPA(obj interface{}) {
func (a *HorizontalController) worker() {
for a.processNextWorkItem() {
}
glog.Infof("horizontal pod autoscaler controller worker shutting down")
klog.Infof("horizontal pod autoscaler controller worker shutting down")
}
func (a *HorizontalController) processNextWorkItem() bool {
@ -189,9 +218,9 @@ func (a *HorizontalController) processNextWorkItem() bool {
return true
}
// Computes the desired number of replicas for the metric specifications listed in the HPA, returning the maximum
// of the computed replica counts, a description of the associated metric, and the statuses of all metrics
// computed.
// computeReplicasForMetrics computes the desired number of replicas for the metric specifications listed in the HPA,
// returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of
// all metrics computed.
func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *autoscalingv1.Scale,
metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) {
@ -216,126 +245,41 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
}
var replicaCountProposal int32
var utilizationProposal int64
var timestampProposal time.Time
var metricNameProposal string
switch metricSpec.Type {
case autoscalingv2.ObjectMetricSourceType:
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetObjectMetricReplicas(currentReplicas, metricSpec.Object.TargetValue.MilliValue(), metricSpec.Object.MetricName, hpa.Namespace, &metricSpec.Object.Target, selector)
metricSelector, err := metav1.LabelSelectorAsSelector(metricSpec.Object.Metric.Selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
}
metricNameProposal = fmt.Sprintf("%s metric %s", metricSpec.Object.Target.Kind, metricSpec.Object.MetricName)
statuses[i] = autoscalingv2.MetricStatus{
Type: autoscalingv2.ObjectMetricSourceType,
Object: &autoscalingv2.ObjectMetricStatus{
Target: metricSpec.Object.Target,
MetricName: metricSpec.Object.MetricName,
CurrentValue: *resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
},
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForObjectMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i], metricSelector)
if err != nil {
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
}
case autoscalingv2.PodsMetricSourceType:
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetMetricReplicas(currentReplicas, metricSpec.Pods.TargetAverageValue.MilliValue(), metricSpec.Pods.MetricName, hpa.Namespace, selector)
metricSelector, err := metav1.LabelSelectorAsSelector(metricSpec.Pods.Metric.Selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetPodsMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetPodsMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get pods metric value: %v", err)
}
metricNameProposal = fmt.Sprintf("pods metric %s", metricSpec.Pods.MetricName)
statuses[i] = autoscalingv2.MetricStatus{
Type: autoscalingv2.PodsMetricSourceType,
Pods: &autoscalingv2.PodsMetricStatus{
MetricName: metricSpec.Pods.MetricName,
CurrentAverageValue: *resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
},
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForPodsMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i], metricSelector)
if err != nil {
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
}
case autoscalingv2.ResourceMetricSourceType:
if metricSpec.Resource.TargetAverageValue != nil {
var rawProposal int64
replicaCountProposal, rawProposal, timestampProposal, err = a.replicaCalc.GetRawResourceReplicas(currentReplicas, metricSpec.Resource.TargetAverageValue.MilliValue(), metricSpec.Resource.Name, hpa.Namespace, selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
}
metricNameProposal = fmt.Sprintf("%s resource", metricSpec.Resource.Name)
statuses[i] = autoscalingv2.MetricStatus{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricStatus{
Name: metricSpec.Resource.Name,
CurrentAverageValue: *resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
},
}
} else {
// set a default utilization percentage if none is set
if metricSpec.Resource.TargetAverageUtilization == nil {
errMsg := "invalid resource metric source: neither a utilization target nor a value target was set"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %s", errMsg)
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
}
targetUtilization := *metricSpec.Resource.TargetAverageUtilization
var percentageProposal int32
var rawProposal int64
replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err = a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, metricSpec.Resource.Name, hpa.Namespace, selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
}
metricNameProposal = fmt.Sprintf("%s resource utilization (percentage of request)", metricSpec.Resource.Name)
statuses[i] = autoscalingv2.MetricStatus{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricStatus{
Name: metricSpec.Resource.Name,
CurrentAverageUtilization: &percentageProposal,
CurrentAverageValue: *resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
},
}
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForResourceMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i])
if err != nil {
return 0, "", nil, time.Time{}, err
}
case autoscalingv2.ExternalMetricSourceType:
if metricSpec.External.TargetAverageValue != nil {
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetExternalPerPodMetricReplicas(currentReplicas, metricSpec.External.TargetAverageValue.MilliValue(), metricSpec.External.MetricName, hpa.Namespace, metricSpec.External.MetricSelector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get %s external metric: %v", metricSpec.External.MetricName, err)
}
metricNameProposal = fmt.Sprintf("external metric %s(%+v)", metricSpec.External.MetricName, metricSpec.External.MetricSelector)
statuses[i] = autoscalingv2.MetricStatus{
Type: autoscalingv2.ExternalMetricSourceType,
External: &autoscalingv2.ExternalMetricStatus{
MetricSelector: metricSpec.External.MetricSelector,
MetricName: metricSpec.External.MetricName,
CurrentAverageValue: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
},
}
} else if metricSpec.External.TargetValue != nil {
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetExternalMetricReplicas(currentReplicas, metricSpec.External.TargetValue.MilliValue(), metricSpec.External.MetricName, hpa.Namespace, metricSpec.External.MetricSelector, selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get external metric %s: %v", metricSpec.External.MetricName, err)
}
metricNameProposal = fmt.Sprintf("external metric %s(%+v)", metricSpec.External.MetricName, metricSpec.External.MetricSelector)
statuses[i] = autoscalingv2.MetricStatus{
Type: autoscalingv2.ExternalMetricSourceType,
External: &autoscalingv2.ExternalMetricStatus{
MetricSelector: metricSpec.External.MetricSelector,
MetricName: metricSpec.External.MetricName,
CurrentValue: *resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
},
}
} else {
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForExternalMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i])
if err != nil {
return 0, "", nil, time.Time{}, err
}
default:
errMsg := fmt.Sprintf("unknown metric source type %q", string(metricSpec.Type))
@ -343,7 +287,6 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidMetricSourceType", "the HPA was unable to compute the replica count: %s", errMsg)
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
}
if replicas == 0 || replicaCountProposal > replicas {
timestamp = timestampProposal
replicas = replicaCountProposal
@ -363,14 +306,171 @@ func (a *HorizontalController) reconcileKey(key string) error {
hpa, err := a.hpaLister.HorizontalPodAutoscalers(namespace).Get(name)
if errors.IsNotFound(err) {
glog.Infof("Horizontal Pod Autoscaler has been deleted %v", key)
klog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace)
delete(a.recommendations, key)
return nil
}
return a.reconcileAutoscaler(hpa)
return a.reconcileAutoscaler(hpa, key)
}
func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.HorizontalPodAutoscaler) error {
// computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType.
func (a *HorizontalController) computeStatusForObjectMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (int32, time.Time, string, error) {
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectMetricReplicas(currentReplicas, metricSpec.Object.Target.Value.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, selector, metricSelector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, timestampProposal, "", err
}
*status = autoscalingv2.MetricStatus{
Type: autoscalingv2.ObjectMetricSourceType,
Object: &autoscalingv2.ObjectMetricStatus{
DescribedObject: metricSpec.Object.DescribedObject,
Metric: autoscalingv2.MetricIdentifier{
Name: metricSpec.Object.Metric.Name,
Selector: metricSpec.Object.Metric.Selector,
},
Current: autoscalingv2.MetricValueStatus{
Value: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
},
},
}
return replicaCountProposal, timestampProposal, fmt.Sprintf("%s metric %s", metricSpec.Object.DescribedObject.Kind, metricSpec.Object.Metric.Name), nil
}
// computeStatusForPodsMetric computes the desired number of replicas for the specified metric of type PodsMetricSourceType.
func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (int32, time.Time, string, error) {
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetMetricReplicas(currentReplicas, metricSpec.Pods.Target.AverageValue.MilliValue(), metricSpec.Pods.Metric.Name, hpa.Namespace, selector, metricSelector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetPodsMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetPodsMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, timestampProposal, "", err
}
*status = autoscalingv2.MetricStatus{
Type: autoscalingv2.PodsMetricSourceType,
Pods: &autoscalingv2.PodsMetricStatus{
Metric: autoscalingv2.MetricIdentifier{
Name: metricSpec.Pods.Metric.Name,
Selector: metricSpec.Pods.Metric.Selector,
},
Current: autoscalingv2.MetricValueStatus{
AverageValue: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
},
},
}
return replicaCountProposal, timestampProposal, fmt.Sprintf("pods metric %s", metricSpec.Pods.Metric.Name), nil
}
// computeStatusForResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (int32, time.Time, string, error) {
if metricSpec.Resource.Target.AverageValue != nil {
var rawProposal int64
replicaCountProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetRawResourceReplicas(currentReplicas, metricSpec.Resource.Target.AverageValue.MilliValue(), metricSpec.Resource.Name, hpa.Namespace, selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, time.Time{}, "", fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
}
metricNameProposal := fmt.Sprintf("%s resource", metricSpec.Resource.Name)
status = &autoscalingv2.MetricStatus{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricStatus{
Name: metricSpec.Resource.Name,
Current: autoscalingv2.MetricValueStatus{
AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
},
},
}
return replicaCountProposal, timestampProposal, metricNameProposal, nil
} else {
if metricSpec.Resource.Target.AverageUtilization == nil {
errMsg := "invalid resource metric source: neither a utilization target nor a value target was set"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %s", errMsg)
return 0, time.Time{}, "", fmt.Errorf(errMsg)
}
targetUtilization := *metricSpec.Resource.Target.AverageUtilization
var percentageProposal int32
var rawProposal int64
replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, metricSpec.Resource.Name, hpa.Namespace, selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, time.Time{}, "", fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
}
metricNameProposal := fmt.Sprintf("%s resource utilization (percentage of request)", metricSpec.Resource.Name)
*status = autoscalingv2.MetricStatus{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricStatus{
Name: metricSpec.Resource.Name,
Current: autoscalingv2.MetricValueStatus{
AverageUtilization: &percentageProposal,
AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
},
},
}
return replicaCountProposal, timestampProposal, metricNameProposal, nil
}
}
// computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType.
func (a *HorizontalController) computeStatusForExternalMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (int32, time.Time, string, error) {
if metricSpec.External.Target.AverageValue != nil {
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalPerPodMetricReplicas(currentReplicas, metricSpec.External.Target.AverageValue.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, time.Time{}, "", fmt.Errorf("failed to get %s external metric: %v", metricSpec.External.Metric.Name, err)
}
*status = autoscalingv2.MetricStatus{
Type: autoscalingv2.ExternalMetricSourceType,
External: &autoscalingv2.ExternalMetricStatus{
Metric: autoscalingv2.MetricIdentifier{
Name: metricSpec.External.Metric.Name,
Selector: metricSpec.External.Metric.Selector,
},
Current: autoscalingv2.MetricValueStatus{
AverageValue: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
},
},
}
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), nil
}
if metricSpec.External.Target.Value != nil {
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalMetricReplicas(currentReplicas, metricSpec.External.Target.Value.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector, selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, time.Time{}, "", fmt.Errorf("failed to get external metric %s: %v", metricSpec.External.Metric.Name, err)
}
*status = autoscalingv2.MetricStatus{
Type: autoscalingv2.ExternalMetricSourceType,
External: &autoscalingv2.ExternalMetricStatus{
Metric: autoscalingv2.MetricIdentifier{
Name: metricSpec.External.Metric.Name,
Selector: metricSpec.External.Metric.Selector,
},
Current: autoscalingv2.MetricValueStatus{
Value: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
},
},
}
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), nil
}
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %s", errMsg)
return 0, time.Time{}, "", fmt.Errorf(errMsg)
}
func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) {
if a.recommendations[key] == nil {
a.recommendations[key] = []timestampedRecommendation{{currentReplicas, time.Now()}}
}
}
func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.HorizontalPodAutoscaler, key string) error {
// make a copy so that we never mutate the shared informer cache (conversion can mutate the object)
hpav1 := hpav1Shared.DeepCopy()
// then, convert to autoscaling/v2, which makes our lives easier when calculating metrics
@ -414,6 +514,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
}
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededGetScale", "the HPA controller was able to get the target's current scale")
currentReplicas := scale.Status.Replicas
a.recordInitialRecommendation(currentReplicas, key)
var metricStatuses []autoscalingv2.MetricStatus
metricDesiredReplicas := int32(0)
@ -441,6 +542,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
rescaleReason = "Current number of replicas must be greater than 0"
desiredReplicas = 1
} else {
metricDesiredReplicas, metricName, metricStatuses, metricTimestamp, err = a.computeReplicasForMetrics(hpa, scale, hpa.Spec.Metrics)
if err != nil {
a.setCurrentReplicasInStatus(hpa, currentReplicas)
@ -451,7 +553,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
return fmt.Errorf("failed to compute desired number of replicas based on listed metrics for %s: %v", reference, err)
}
glog.V(4).Infof("proposing %v desired replicas (based on %s from %s) for %s", metricDesiredReplicas, metricName, timestamp, reference)
klog.V(4).Infof("proposing %v desired replicas (based on %s from %s) for %s", metricDesiredReplicas, metricName, timestamp, reference)
rescaleMetric := ""
if metricDesiredReplicas > desiredReplicas {
@ -465,33 +567,8 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
if desiredReplicas < currentReplicas {
rescaleReason = "All metrics below target"
}
desiredReplicas = a.normalizeDesiredReplicas(hpa, currentReplicas, desiredReplicas)
rescale = a.shouldScale(hpa, currentReplicas, desiredReplicas, timestamp)
backoffDown := false
backoffUp := false
if hpa.Status.LastScaleTime != nil {
if !hpa.Status.LastScaleTime.Add(a.downscaleForbiddenWindow).Before(timestamp) {
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "BackoffDownscale", "the time since the previous scale is still within the downscale forbidden window")
backoffDown = true
}
if !hpa.Status.LastScaleTime.Add(a.upscaleForbiddenWindow).Before(timestamp) {
backoffUp = true
if backoffDown {
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "BackoffBoth", "the time since the previous scale is still within both the downscale and upscale forbidden windows")
} else {
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "BackoffUpscale", "the time since the previous scale is still within the upscale forbidden window")
}
}
}
if !backoffDown && !backoffUp {
// mark that we're not backing off
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "the last scale time was sufficiently old as to warrant a new scale")
}
desiredReplicas = a.normalizeDesiredReplicas(hpa, key, currentReplicas, desiredReplicas)
rescale = desiredReplicas != currentReplicas
}
if rescale {
@ -508,10 +585,10 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
}
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededRescale", "the HPA controller was able to update the target scale to %d", desiredReplicas)
a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
glog.Infof("Successful rescale of %s, old size: %d, new size: %d, reason: %s",
klog.Infof("Successful rescale of %s, old size: %d, new size: %d, reason: %s",
hpa.Name, currentReplicas, desiredReplicas, rescaleReason)
} else {
glog.V(4).Infof("decided not to scale %s to %v (last scale time was %s)", reference, desiredReplicas, hpa.Status.LastScaleTime)
klog.V(4).Infof("decided not to scale %s to %v (last scale time was %s)", reference, desiredReplicas, hpa.Status.LastScaleTime)
desiredReplicas = currentReplicas
}
@ -519,9 +596,39 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
return a.updateStatusIfNeeded(hpaStatusOriginal, hpa)
}
// stabilizeRecommendation:
// - replaces old recommendation with the newest recommendation,
// - returns max of recommendations that are not older than downscaleStabilisationWindow.
func (a *HorizontalController) stabilizeRecommendation(key string, prenormalizedDesiredReplicas int32) int32 {
maxRecommendation := prenormalizedDesiredReplicas
foundOldSample := false
oldSampleIndex := 0
cutoff := time.Now().Add(-a.downscaleStabilisationWindow)
for i, rec := range a.recommendations[key] {
if rec.timestamp.Before(cutoff) {
foundOldSample = true
oldSampleIndex = i
} else if rec.recommendation > maxRecommendation {
maxRecommendation = rec.recommendation
}
}
if foundOldSample {
a.recommendations[key][oldSampleIndex] = timestampedRecommendation{prenormalizedDesiredReplicas, time.Now()}
} else {
a.recommendations[key] = append(a.recommendations[key], timestampedRecommendation{prenormalizedDesiredReplicas, time.Now()})
}
return maxRecommendation
}
// normalizeDesiredReplicas takes the metrics desired replicas value and normalizes it based on the appropriate conditions (i.e. < maxReplicas, >
// minReplicas, etc...)
func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas int32, prenormalizedDesiredReplicas int32) int32 {
func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.HorizontalPodAutoscaler, key string, currentReplicas int32, prenormalizedDesiredReplicas int32) int32 {
stabilizedRecommendation := a.stabilizeRecommendation(key, prenormalizedDesiredReplicas)
if stabilizedRecommendation != prenormalizedDesiredReplicas {
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ScaleDownStabilized", "recent recommendations were higher than current one, applying the highest recent recommendation")
} else {
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
}
var minReplicas int32
if hpa.Spec.MinReplicas != nil {
minReplicas = *hpa.Spec.MinReplicas
@ -529,9 +636,9 @@ func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.Horiz
minReplicas = 0
}
desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, prenormalizedDesiredReplicas, minReplicas, hpa.Spec.MaxReplicas)
desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
if desiredReplicas == prenormalizedDesiredReplicas {
if desiredReplicas == stabilizedRecommendation {
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, condition, reason)
} else {
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, condition, reason)
@ -588,30 +695,6 @@ func calculateScaleUpLimit(currentReplicas int32) int32 {
return int32(math.Max(scaleUpLimitFactor*float64(currentReplicas), scaleUpLimitMinimum))
}
func (a *HorizontalController) shouldScale(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, timestamp time.Time) bool {
if desiredReplicas == currentReplicas {
return false
}
if hpa.Status.LastScaleTime == nil {
return true
}
// Going down only if the usageRatio dropped significantly below the target
// and there was no rescaling in the last downscaleForbiddenWindow.
if desiredReplicas < currentReplicas && hpa.Status.LastScaleTime.Add(a.downscaleForbiddenWindow).Before(timestamp) {
return true
}
// Going up only if the usage ratio increased significantly above the target
// and there was no rescaling in the last upscaleForbiddenWindow.
if desiredReplicas > currentReplicas && hpa.Status.LastScaleTime.Add(a.upscaleForbiddenWindow).Before(timestamp) {
return true
}
return false
}
// scaleForResourceMappings attempts to fetch the scale for the
// resource with the given name and namespace, trying each RESTMapping
// in turn until a working one is found. If none work, the first error
@ -687,7 +770,7 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
}
glog.V(2).Infof("Successfully updated status for %s", hpa.Name)
klog.V(2).Infof("Successfully updated status for %s", hpa.Name)
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -98,7 +98,8 @@ type legacyTestCase struct {
resource *fakeResource
// Last scale time
lastScaleTime *metav1.Time
lastScaleTime *metav1.Time
recommendations []timestampedRecommendation
}
// Needs to be called under a lock.
@ -178,7 +179,7 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *sca
{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricSource{
Name: v1.ResourceCPU,
Name: v1.ResourceCPU,
TargetAverageUtilization: &tc.CPUTarget,
},
},
@ -222,7 +223,8 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *sca
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := v1.Pod{
Status: v1.PodStatus{
Phase: v1.PodRunning,
StartTime: &metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
@ -484,29 +486,29 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
return true, obj, nil
})
replicaCalc := &ReplicaCalculator{
metricsClient: metricsClient,
podsGetter: testClient.Core(),
tolerance: defaultTestingTolerance,
}
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
defaultUpscaleForbiddenWindow := 3 * time.Minute
defaultDownscaleForbiddenWindow := 5 * time.Minute
defaultDownscaleStabilisationWindow := 5 * time.Minute
hpaController := NewHorizontalController(
eventClient.Core(),
testScaleClient,
testClient.Autoscaling(),
testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
replicaCalc,
metricsClient,
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
informerFactory.Core().V1().Pods(),
controller.NoResyncPeriodFunc(),
defaultUpscaleForbiddenWindow,
defaultDownscaleForbiddenWindow,
defaultDownscaleStabilisationWindow,
defaultTestingTolerance,
defaultTestingCpuInitializationPeriod,
defaultTestingDelayOfInitialReadinessStatus,
)
hpaController.hpaListerSynced = alwaysReady
if tc.recommendations != nil {
hpaController.recommendations["test-namespace/test-hpa"] = tc.recommendations
}
stop := make(chan struct{})
defer close(stop)
informerFactory.Start(stop)
@ -547,8 +549,7 @@ func TestLegacyScaleUpUnreadyLessScale(t *testing.T) {
initialReplicas: 3,
desiredReplicas: 4,
CPUTarget: 30,
CPUCurrent: 60,
verifyCPUCurrent: true,
verifyCPUCurrent: false,
reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
@ -636,12 +637,12 @@ func TestLegacyScaleUpCM(t *testing.T) {
tc.runTest(t)
}
func TestLegacyScaleUpCMUnreadyLessScale(t *testing.T) {
func TestLegacyScaleUpCMUnreadyNoLessScale(t *testing.T) {
tc := legacyTestCase{
minReplicas: 2,
maxReplicas: 6,
initialReplicas: 3,
desiredReplicas: 4,
desiredReplicas: 6,
CPUTarget: 0,
metricsTarget: []autoscalingv2.MetricSpec{
{
@ -664,7 +665,7 @@ func TestLegacyScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
minReplicas: 2,
maxReplicas: 6,
initialReplicas: 3,
desiredReplicas: 3,
desiredReplicas: 6,
CPUTarget: 0,
metricsTarget: []autoscalingv2.MetricSpec{
{
@ -693,6 +694,7 @@ func TestLegacyScaleDown(t *testing.T) {
reportedLevels: []uint64{100, 300, 500, 250, 250},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsAPI: true,
recommendations: []timestampedRecommendation{},
}
tc.runTest(t)
}
@ -715,6 +717,7 @@ func TestLegacyScaleDownCM(t *testing.T) {
},
reportedLevels: []uint64{12, 12, 12, 12, 12},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
recommendations: []timestampedRecommendation{},
}
tc.runTest(t)
}
@ -732,6 +735,7 @@ func TestLegacyScaleDownIgnoresUnreadyPods(t *testing.T) {
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsAPI: true,
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
recommendations: []timestampedRecommendation{},
}
tc.runTest(t)
}

View File

@ -29,9 +29,11 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
restclient "k8s.io/client-go/rest"
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
heapster "k8s.io/heapster/metrics/api/v1/types"
@ -67,7 +69,8 @@ func (tc *legacyReplicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clien
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := v1.Pod{
Status: v1.PodStatus{
Phase: v1.PodRunning,
Phase: v1.PodRunning,
StartTime: &metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
@ -185,10 +188,16 @@ func (tc *legacyReplicaCalcTestCase) runTest(t *testing.T) {
testClient := tc.prepareTestClient(t)
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
replicaCalc := &ReplicaCalculator{
metricsClient: metricsClient,
podsGetter: testClient.Core(),
tolerance: defaultTestingTolerance,
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
informer := informerFactory.Core().V1().Pods()
replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingTolerance, defaultTestingCpuInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
stop := make(chan struct{})
defer close(stop)
informerFactory.Start(stop)
if !controller.WaitForCacheSync("HPA", stop, informer.Informer().HasSynced) {
return
}
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
@ -213,7 +222,7 @@ func (tc *legacyReplicaCalcTestCase) runTest(t *testing.T) {
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
} else {
outReplicas, outUtilization, outTimestamp, err := replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector)
outReplicas, outUtilization, outTimestamp, err := replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector, nil)
if tc.expectedError != nil {
require.Error(t, err, "there should be an error calculating the replica count")
@ -310,10 +319,10 @@ func TestLegacyReplicaCalcScaleUpCM(t *testing.T) {
tc.runTest(t)
}
func TestLegacyReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
func TestLegacyReplicaCalcScaleUpCMUnreadyNoLessScale(t *testing.T) {
tc := legacyReplicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 4,
expectedReplicas: 6,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
metric: &metricInfo{
name: "qps",
@ -325,16 +334,16 @@ func TestLegacyReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
tc.runTest(t)
}
func TestLegacyReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
func TestLegacyReplicaCalcScaleUpCMUnreadyScale(t *testing.T) {
tc := legacyReplicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
expectedReplicas: 7,
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
metric: &metricInfo{
name: "qps",
levels: []int64{50000, 15000, 30000},
targetUtilization: 15000,
expectedUtilization: 15000,
expectedUtilization: 31666,
},
}
tc.runTest(t)

View File

@ -1,10 +1,4 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
@ -15,21 +9,22 @@ go_library(
"utilization.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta2:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -44,26 +39,26 @@ go_test(
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/extensions/install:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta2:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/external_metrics/v1beta1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics/fake:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/external_metrics/fake:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/external_metrics/v1beta1:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/custom_metrics/fake:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/external_metrics/fake:go_default_library",
],
)
@ -78,4 +73,5 @@ filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -19,14 +19,20 @@ package metrics
import (
"time"
autoscaling "k8s.io/api/autoscaling/v2beta1"
autoscaling "k8s.io/api/autoscaling/v2beta2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
)
// PodMetricsInfo contains pod metric values as a map from pod names to
// metric values (the metric values are expected to be the metric as a milli-value)
type PodMetricsInfo map[string]int64
// PodMetric contains pod metric value (the metric values are expected to be the metric as a milli-value)
type PodMetric struct {
Timestamp time.Time
Window time.Duration
Value int64
}
// PodMetricsInfo contains pod metrics as a map from pod names to PodMetricsInfo
type PodMetricsInfo map[string]PodMetric
// MetricsClient knows how to query a remote interface to retrieve container-level
// resource metrics as well as pod-level arbitrary metrics
@ -37,11 +43,11 @@ type MetricsClient interface {
// GetRawMetric gets the given metric (and an associated oldest timestamp)
// for all pods matching the specified selector in the given namespace
GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error)
// GetObjectMetric gets the given metric (and an associated timestamp) for the given
// object in the given namespace
GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error)
GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, metricSelector labels.Selector) (int64, time.Time, error)
// GetExternalMetric gets all the values of a given external metric
// that match the specified selector.

View File

@ -22,11 +22,11 @@ import (
"strings"
"time"
"github.com/golang/glog"
heapster "k8s.io/heapster/metrics/api/v1/types"
"k8s.io/klog"
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
autoscaling "k8s.io/api/autoscaling/v2beta1"
autoscaling "k8s.io/api/autoscaling/v2beta2"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -35,10 +35,11 @@ import (
)
const (
DefaultHeapsterNamespace = "kube-system"
DefaultHeapsterScheme = "http"
DefaultHeapsterService = "heapster"
DefaultHeapsterPort = "" // use the first exposed port on the service
DefaultHeapsterNamespace = "kube-system"
DefaultHeapsterScheme = "http"
DefaultHeapsterService = "heapster"
DefaultHeapsterPort = "" // use the first exposed port on the service
heapsterDefaultMetricWindow = time.Minute
)
var heapsterQueryStart = -5 * time.Minute
@ -72,7 +73,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
return nil, time.Time{}, fmt.Errorf("failed to get pod resource metrics: %v", err)
}
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
klog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
metrics := metricsapi.PodMetricsList{}
err = json.Unmarshal(resultRaw, &metrics)
@ -93,14 +94,18 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
resValue, found := c.Usage[v1.ResourceName(resource)]
if !found {
missing = true
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
continue
}
podSum += resValue.MilliValue()
}
if !missing {
res[m.Name] = int64(podSum)
res[m.Name] = PodMetric{
Timestamp: m.Timestamp.Time,
Window: m.Window.Duration,
Value: int64(podSum),
}
}
}
@ -109,7 +114,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
return res, timestamp, nil
}
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error) {
podList, err := h.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
@ -145,7 +150,7 @@ func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
}
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
klog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
if len(metrics.Items) != len(podNames) {
// if we get too many metrics or two few metrics, we have no way of knowing which metric goes to which pod
@ -159,7 +164,12 @@ func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string
for i, podMetrics := range metrics.Items {
val, podTimestamp, hadMetrics := collapseTimeSamples(podMetrics, time.Minute)
if hadMetrics {
res[podNames[i]] = val
res[podNames[i]] = PodMetric{
Timestamp: podTimestamp,
Window: heapsterDefaultMetricWindow,
Value: int64(val),
}
if timestamp == nil || podTimestamp.Before(*timestamp) {
timestamp = &podTimestamp
}
@ -173,7 +183,7 @@ func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string
return res, *timestamp, nil
}
func (h *HeapsterMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error) {
func (h *HeapsterMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, metricSelector labels.Selector) (int64, time.Time, error) {
return 0, time.Time{}, fmt.Errorf("object metrics are not yet supported")
}

View File

@ -68,13 +68,15 @@ type testCase struct {
replicas int
targetTimestamp int
window time.Duration
reportedMetricsPoints [][]metricPoint
reportedPodMetrics [][]int64
namespace string
selector labels.Selector
resourceName v1.ResourceName
metricName string
namespace string
selector labels.Selector
metricSelector labels.Selector
resourceName v1.ResourceName
metricName string
}
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
@ -108,7 +110,8 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: namespace,
},
Timestamp: metav1.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
Timestamp: metav1.Time{Time: offsetTimestampBy(tc.targetTimestamp)},
Window: metav1.Duration{Duration: tc.window},
Containers: []metricsapi.ContainerMetrics{},
}
for j, cpu := range containers {
@ -137,7 +140,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
var heapsterMetricPoints []heapster.MetricPoint
for _, reportedMetricPoint := range reportedMetricPoints {
timestamp := fixedTimestamp.Add(time.Duration(reportedMetricPoint.timestamp) * time.Minute)
timestamp := offsetTimestampBy(reportedMetricPoint.timestamp)
if latestTimestamp.Before(timestamp) {
latestTimestamp = timestamp
}
@ -196,10 +199,20 @@ func (tc *testCase) verifyResults(t *testing.T, metrics PodMetricsInfo, timestam
}
assert.NoError(t, err, "there should be no error retrieving the metrics")
assert.NotNil(t, metrics, "there should be metrics returned")
if len(metrics) != len(tc.desiredMetricValues) {
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
} else {
for k, m := range metrics {
if !m.Timestamp.Equal(tc.desiredMetricValues[k].Timestamp) ||
m.Window != tc.desiredMetricValues[k].Window ||
m.Value != tc.desiredMetricValues[k].Value {
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
break
}
}
}
assert.Equal(t, tc.desiredMetricValues, metrics, "the metrics values should be as expected")
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
targetTimestamp := offsetTimestampBy(tc.targetTimestamp)
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
}
@ -211,71 +224,92 @@ func (tc *testCase) runTest(t *testing.T) {
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector)
tc.verifyResults(t, info, timestamp, err)
} else {
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector, tc.metricSelector)
tc.verifyResults(t, info, timestamp, err)
}
}
func TestCPU(t *testing.T) {
targetTimestamp := 1
window := 30 * time.Second
tc := testCase{
replicas: 3,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
"test-pod-0": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
"test-pod-1": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
"test-pod-2": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
},
resourceName: v1.ResourceCPU,
targetTimestamp: 1,
targetTimestamp: targetTimestamp,
window: window,
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
}
tc.runTest(t)
}
func TestQPS(t *testing.T) {
targetTimestamp := 1
tc := testCase{
replicas: 3,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 10000, "test-pod-1": 20000, "test-pod-2": 10000,
"test-pod-0": PodMetric{Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
"test-pod-1": PodMetric{Value: 20000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
"test-pod-2": PodMetric{Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
},
metricName: "qps",
targetTimestamp: 1,
targetTimestamp: targetTimestamp,
reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}},
}
tc.runTest(t)
}
func TestQpsSumEqualZero(t *testing.T) {
targetTimestamp := 0
tc := testCase{
replicas: 3,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 0, "test-pod-1": 0, "test-pod-2": 0,
"test-pod-0": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
"test-pod-1": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
"test-pod-2": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
},
metricName: "qps",
targetTimestamp: 0,
targetTimestamp: targetTimestamp,
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
}
tc.runTest(t)
}
func TestCPUMoreMetrics(t *testing.T) {
targetTimestamp := 10
window := 30 * time.Second
tc := testCase{
replicas: 5,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
"test-pod-3": 5000, "test-pod-4": 5000,
"test-pod-0": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
"test-pod-1": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
"test-pod-2": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
"test-pod-3": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
"test-pod-4": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
},
resourceName: v1.ResourceCPU,
targetTimestamp: 10,
targetTimestamp: targetTimestamp,
window: window,
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
}
tc.runTest(t)
}
func TestCPUMissingMetrics(t *testing.T) {
targetTimestamp := 0
window := 30 * time.Second
tc := testCase{
replicas: 3,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 4000,
"test-pod-0": PodMetric{Value: 4000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
},
resourceName: v1.ResourceCPU,
targetTimestamp: targetTimestamp,
window: window,
reportedPodMetrics: [][]int64{{4000}},
}
tc.runTest(t)
@ -314,13 +348,15 @@ func TestCPUEmptyMetrics(t *testing.T) {
}
func TestQpsEmptyEntries(t *testing.T) {
targetTimestamp := 4
tc := testCase{
replicas: 3,
metricName: "qps",
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 4000000, "test-pod-2": 2000000,
"test-pod-0": PodMetric{Value: 4000000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
"test-pod-2": PodMetric{Value: 2000000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
},
targetTimestamp: 4,
targetTimestamp: targetTimestamp,
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {}, {{2000, 4}}},
}
tc.runTest(t)
@ -337,12 +373,17 @@ func TestCPUZeroReplicas(t *testing.T) {
}
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
targetTimestamp := 0
window := 30 * time.Second
tc := testCase{
replicas: 3,
resourceName: v1.ResourceCPU,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 100, "test-pod-1": 700,
"test-pod-0": PodMetric{Value: 100, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
"test-pod-1": PodMetric{Value: 700, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
},
targetTimestamp: targetTimestamp,
window: window,
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
}
tc.runTest(t)
@ -363,3 +404,7 @@ func testCollapseTimeSamples(t *testing.T) {
assert.InEpsilon(t, float64(75), val, 0.1, "collapsed sample value should be as expected")
assert.True(t, timestamp.Equal(now), "timestamp should be the current time (the newest)")
}
func offsetTimestampBy(t int) time.Time {
return fixedTimestamp.Add(time.Duration(t) * time.Minute)
}

View File

@ -20,19 +20,23 @@ import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/klog"
autoscaling "k8s.io/api/autoscaling/v2beta1"
autoscaling "k8s.io/api/autoscaling/v2beta2"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
customapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1"
resourceclient "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1"
customapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
customclient "k8s.io/metrics/pkg/client/custom_metrics"
externalclient "k8s.io/metrics/pkg/client/external_metrics"
)
const (
metricServerDefaultMetricWindow = time.Minute
)
func NewRESTMetricsClient(resourceClient resourceclient.PodMetricsesGetter, customClient customclient.CustomMetricsClient, externalClient externalclient.ExternalMetricsClient) MetricsClient {
return &restMetricsClient{
&resourceMetricsClient{resourceClient},
@ -77,14 +81,18 @@ func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, name
resValue, found := c.Usage[v1.ResourceName(resource)]
if !found {
missing = true
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
break // containers loop
}
podSum += resValue.MilliValue()
}
if !missing {
res[m.Name] = int64(podSum)
res[m.Name] = PodMetric{
Timestamp: m.Timestamp.Time,
Window: m.Window.Duration,
Value: int64(podSum),
}
}
}
@ -101,8 +109,8 @@ type customMetricsClient struct {
// GetRawMetric gets the given metric (and an associated oldest timestamp)
// for all pods matching the specified selector in the given namespace
func (c *customMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
metrics, err := c.client.NamespacedMetrics(namespace).GetForObjects(schema.GroupKind{Kind: "Pod"}, selector, metricName)
func (c *customMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error) {
metrics, err := c.client.NamespacedMetrics(namespace).GetForObjects(schema.GroupKind{Kind: "Pod"}, selector, metricName, metricSelector)
if err != nil {
return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from custom metrics API: %v", err)
}
@ -113,7 +121,17 @@ func (c *customMetricsClient) GetRawMetric(metricName string, namespace string,
res := make(PodMetricsInfo, len(metrics.Items))
for _, m := range metrics.Items {
res[m.DescribedObject.Name] = m.Value.MilliValue()
window := metricServerDefaultMetricWindow
if m.WindowSeconds != nil {
window = time.Duration(*m.WindowSeconds) * time.Second
}
res[m.DescribedObject.Name] = PodMetric{
Timestamp: m.Timestamp.Time,
Window: window,
Value: int64(m.Value.MilliValue()),
}
m.Value.MilliValue()
}
timestamp := metrics.Items[0].Timestamp.Time
@ -123,7 +141,7 @@ func (c *customMetricsClient) GetRawMetric(metricName string, namespace string,
// GetObjectMetric gets the given metric (and an associated timestamp) for the given
// object in the given namespace
func (c *customMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error) {
func (c *customMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, metricSelector labels.Selector) (int64, time.Time, error) {
gvk := schema.FromAPIVersionAndKind(objectRef.APIVersion, objectRef.Kind)
var metricValue *customapi.MetricValue
var err error
@ -131,9 +149,9 @@ func (c *customMetricsClient) GetObjectMetric(metricName string, namespace strin
// handle namespace separately
// NB: we ignore namespace name here, since CrossVersionObjectReference isn't
// supposed to allow you to escape your namespace
metricValue, err = c.client.RootScopedMetrics().GetForObject(gvk.GroupKind(), namespace, metricName)
metricValue, err = c.client.RootScopedMetrics().GetForObject(gvk.GroupKind(), namespace, metricName, metricSelector)
} else {
metricValue, err = c.client.NamespacedMetrics(namespace).GetForObject(gvk.GroupKind(), objectRef.Name, metricName)
metricValue, err = c.client.NamespacedMetrics(namespace).GetForObject(gvk.GroupKind(), objectRef.Name, metricName, metricSelector)
}
if err != nil {

View File

@ -21,7 +21,7 @@ import (
"testing"
"time"
autoscalingapi "k8s.io/api/autoscaling/v2beta1"
autoscalingapi "k8s.io/api/autoscaling/v2beta2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
"k8s.io/apimachinery/pkg/api/resource"
@ -32,10 +32,10 @@ import (
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme"
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1"
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
metricsfake "k8s.io/metrics/pkg/client/clientset_generated/clientset/fake"
metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake"
cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
emfake "k8s.io/metrics/pkg/client/external_metrics/fake"
@ -48,6 +48,7 @@ type restClientTestCase struct {
// "timestamps" here are actually the offset in minutes from a base timestamp
targetTimestamp int
window time.Duration
reportedMetricPoints []metricPoint
reportedPodMetrics [][]int64
singleObject *autoscalingapi.CrossVersionObjectReference
@ -86,7 +87,8 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
Namespace: namespace,
Labels: podLabels,
},
Timestamp: metav1.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
Timestamp: metav1.Time{Time: offsetTimestampBy(tc.targetTimestamp)},
Window: metav1.Duration{Duration: tc.window},
Containers: []metricsapi.ContainerMetrics{},
}
for j, cpu := range containers {
@ -115,7 +117,7 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
metrics := emapi.ExternalMetricValueList{}
for _, metricPoint := range tc.reportedMetricPoints {
timestamp := fixedTimestamp.Add(time.Duration(metricPoint.timestamp) * time.Minute)
timestamp := offsetTimestampBy(metricPoint.timestamp)
metric := emapi.ExternalMetricValue{
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
Timestamp: metav1.Time{Time: timestamp},
@ -136,16 +138,18 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
assert.Equal(t, "pods", getForAction.GetResource().Resource, "type of object that we requested multiple metrics for should have been pods")
for i, metricPoint := range tc.reportedMetricPoints {
timestamp := fixedTimestamp.Add(time.Duration(metricPoint.timestamp) * time.Minute)
timestamp := offsetTimestampBy(metricPoint.timestamp)
metric := cmapi.MetricValue{
DescribedObject: v1.ObjectReference{
Kind: "Pod",
APIVersion: "v1",
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
},
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
Timestamp: metav1.Time{Time: timestamp},
MetricName: tc.metricName,
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
Timestamp: metav1.Time{Time: timestamp},
Metric: cmapi.MetricIdentifier{
Name: tc.metricName,
},
}
metrics.Items = append(metrics.Items, metric)
@ -166,7 +170,7 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
assert.Equal(t, tc.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
metricPoint := tc.reportedMetricPoints[0]
timestamp := fixedTimestamp.Add(time.Duration(metricPoint.timestamp) * time.Minute)
timestamp := offsetTimestampBy(metricPoint.timestamp)
metrics := &cmapi.MetricValueList{
Items: []cmapi.MetricValue{
@ -176,9 +180,11 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
APIVersion: tc.singleObject.APIVersion,
Name: tc.singleObject.Name,
},
Timestamp: metav1.Time{Time: timestamp},
MetricName: tc.metricName,
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
Timestamp: metav1.Time{Time: timestamp},
Metric: cmapi.MetricIdentifier{
Name: tc.metricName,
},
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
},
},
}
@ -200,9 +206,20 @@ func (tc *restClientTestCase) verifyResults(t *testing.T, metrics PodMetricsInfo
assert.NoError(t, err, "there should be no error retrieving the metrics")
assert.NotNil(t, metrics, "there should be metrics returned")
assert.Equal(t, tc.desiredMetricValues, metrics, "the metrics values should be as expected")
if len(metrics) != len(tc.desiredMetricValues) {
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
} else {
for k, m := range metrics {
if !m.Timestamp.Equal(tc.desiredMetricValues[k].Timestamp) ||
m.Window != tc.desiredMetricValues[k].Window ||
m.Value != tc.desiredMetricValues[k].Value {
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
break
}
}
}
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
targetTimestamp := offsetTimestampBy(tc.targetTimestamp)
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
}
@ -223,26 +240,31 @@ func (tc *restClientTestCase) runTest(t *testing.T) {
val, timestamp, err := metricsClient.GetExternalMetric(tc.metricName, tc.namespace, tc.metricLabelSelector)
info := make(PodMetricsInfo, len(val))
for i, metricVal := range val {
info[fmt.Sprintf("%v-val-%v", tc.metricName, i)] = metricVal
info[fmt.Sprintf("%v-val-%v", tc.metricName, i)] = PodMetric{Value: metricVal}
}
tc.verifyResults(t, info, timestamp, err)
} else if tc.singleObject == nil {
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector, tc.metricLabelSelector)
tc.verifyResults(t, info, timestamp, err)
} else {
val, timestamp, err := metricsClient.GetObjectMetric(tc.metricName, tc.namespace, tc.singleObject)
info := PodMetricsInfo{tc.singleObject.Name: val}
val, timestamp, err := metricsClient.GetObjectMetric(tc.metricName, tc.namespace, tc.singleObject, tc.metricLabelSelector)
info := PodMetricsInfo{tc.singleObject.Name: {Value: val}}
tc.verifyResults(t, info, timestamp, err)
}
}
func TestRESTClientCPU(t *testing.T) {
targetTimestamp := 1
window := 30 * time.Second
tc := restClientTestCase{
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
"test-pod-0": {Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
"test-pod-1": {Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
"test-pod-2": {Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
},
resourceName: v1.ResourceCPU,
targetTimestamp: 1,
targetTimestamp: targetTimestamp,
window: window,
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
}
tc.runTest(t)
@ -251,7 +273,7 @@ func TestRESTClientCPU(t *testing.T) {
func TestRESTClientExternal(t *testing.T) {
tc := restClientTestCase{
desiredMetricValues: PodMetricsInfo{
"external-val-0": 10000, "external-val-1": 20000, "external-val-2": 10000,
"external-val-0": {Value: 10000}, "external-val-1": {Value: 20000}, "external-val-2": {Value: 10000},
},
metricSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricName: "external",
@ -262,12 +284,15 @@ func TestRESTClientExternal(t *testing.T) {
}
func TestRESTClientQPS(t *testing.T) {
targetTimestamp := 1
tc := restClientTestCase{
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 10000, "test-pod-1": 20000, "test-pod-2": 10000,
"test-pod-0": {Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
"test-pod-1": {Value: 20000, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
"test-pod-2": {Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
},
metricName: "qps",
targetTimestamp: 1,
targetTimestamp: targetTimestamp,
reportedMetricPoints: []metricPoint{{10000, 1}, {20000, 1}, {10000, 1}},
}
tc.runTest(t)
@ -275,7 +300,7 @@ func TestRESTClientQPS(t *testing.T) {
func TestRESTClientSingleObject(t *testing.T) {
tc := restClientTestCase{
desiredMetricValues: PodMetricsInfo{"some-dep": 10},
desiredMetricValues: PodMetricsInfo{"some-dep": {Value: 10}},
metricName: "queue-length",
targetTimestamp: 1,
reportedMetricPoints: []metricPoint{{10, 1}},
@ -289,12 +314,15 @@ func TestRESTClientSingleObject(t *testing.T) {
}
func TestRESTClientQpsSumEqualZero(t *testing.T) {
targetTimestamp := 0
tc := restClientTestCase{
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 0, "test-pod-1": 0, "test-pod-2": 0,
"test-pod-0": {Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
"test-pod-1": {Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
"test-pod-2": {Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
},
metricName: "qps",
targetTimestamp: 0,
targetTimestamp: targetTimestamp,
reportedMetricPoints: []metricPoint{{0, 0}, {0, 0}, {0, 0}},
}
tc.runTest(t)
@ -303,7 +331,7 @@ func TestRESTClientQpsSumEqualZero(t *testing.T) {
func TestRESTClientExternalSumEqualZero(t *testing.T) {
tc := restClientTestCase{
desiredMetricValues: PodMetricsInfo{
"external-val-0": 0, "external-val-1": 0, "external-val-2": 0,
"external-val-0": {Value: 0}, "external-val-1": {Value: 0}, "external-val-2": {Value: 0},
},
metricSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricName: "external",
@ -345,11 +373,16 @@ func TestRESTClientCPUEmptyMetrics(t *testing.T) {
}
func TestRESTClientCPUEmptyMetricsForOnePod(t *testing.T) {
targetTimestamp := 1
window := 30 * time.Second
tc := restClientTestCase{
resourceName: v1.ResourceCPU,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 100, "test-pod-1": 700,
"test-pod-0": {Value: 100, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
"test-pod-1": {Value: 700, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
},
targetTimestamp: targetTimestamp,
window: window,
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
}
tc.runTest(t)

View File

@ -28,14 +28,14 @@ func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int
requestsTotal := int64(0)
numEntries := 0
for podName, metricValue := range metrics {
for podName, metric := range metrics {
request, hasRequest := requests[podName]
if !hasRequest {
// we check for missing requests elsewhere, so assuming missing requests == extraneous metrics
continue
}
metricsTotal += metricValue
metricsTotal += metric.Value
requestsTotal += request
numEntries++
}
@ -56,8 +56,8 @@ func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int
// (returning that and the actual utilization)
func GetMetricUtilizationRatio(metrics PodMetricsInfo, targetUtilization int64) (utilizationRatio float64, currentUtilization int64) {
metricsTotal := int64(0)
for _, metricValue := range metrics {
metricsTotal += metricValue
for _, metric := range metrics {
metricsTotal += metric.Value
}
currentUtilization = metricsTotal / int64(len(metrics))

View File

@ -67,7 +67,7 @@ func (tc *metricUtilizationRatioTestCase) runTest(t *testing.T) {
func TestGetResourceUtilizationRatioBaseCase(t *testing.T) {
tc := resourceUtilizationRatioTestCase{
metrics: PodMetricsInfo{
"test-pod-0": 50, "test-pod-1": 76,
"test-pod-0": {Value: 50}, "test-pod-1": {Value: 76},
},
requests: map[string]int64{
"test-pod-0": 100, "test-pod-1": 100,
@ -85,7 +85,7 @@ func TestGetResourceUtilizationRatioBaseCase(t *testing.T) {
func TestGetResourceUtilizationRatioIgnorePodsWithNoRequest(t *testing.T) {
tc := resourceUtilizationRatioTestCase{
metrics: PodMetricsInfo{
"test-pod-0": 50, "test-pod-1": 76, "test-pod-no-request": 100,
"test-pod-0": {Value: 50}, "test-pod-1": {Value: 76}, "test-pod-no-request": {Value: 100},
},
requests: map[string]int64{
"test-pod-0": 100, "test-pod-1": 100,
@ -103,7 +103,7 @@ func TestGetResourceUtilizationRatioIgnorePodsWithNoRequest(t *testing.T) {
func TestGetResourceUtilizationRatioExtraRequest(t *testing.T) {
tc := resourceUtilizationRatioTestCase{
metrics: PodMetricsInfo{
"test-pod-0": 50, "test-pod-1": 76,
"test-pod-0": {Value: 50}, "test-pod-1": {Value: 76},
},
requests: map[string]int64{
"test-pod-0": 100, "test-pod-1": 100, "test-pod-extra-request": 500,
@ -121,7 +121,7 @@ func TestGetResourceUtilizationRatioExtraRequest(t *testing.T) {
func TestGetResourceUtilizationRatioNoRequests(t *testing.T) {
tc := resourceUtilizationRatioTestCase{
metrics: PodMetricsInfo{
"test-pod-0": 50, "test-pod-1": 76,
"test-pod-0": {Value: 50}, "test-pod-1": {Value: 76},
},
requests: map[string]int64{},
targetUtilization: 50,
@ -138,7 +138,7 @@ func TestGetResourceUtilizationRatioNoRequests(t *testing.T) {
func TestGetMetricUtilizationRatioBaseCase(t *testing.T) {
tc := metricUtilizationRatioTestCase{
metrics: PodMetricsInfo{
"test-pod-0": 5000, "test-pod-1": 10000,
"test-pod-0": {Value: 5000}, "test-pod-1": {Value: 10000},
},
targetUtilization: 10000,
expectedUtilizationRatio: .75,

View File

@ -21,12 +21,12 @@ import (
"math"
"time"
autoscaling "k8s.io/api/autoscaling/v2beta1"
autoscaling "k8s.io/api/autoscaling/v2beta2"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
v1coreclient "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
)
@ -34,20 +34,26 @@ import (
const (
// defaultTestingTolerance is default value for calculating when to
// scale up/scale down.
defaultTestingTolerance = 0.1
defaultTestingTolerance = 0.1
defaultTestingCpuInitializationPeriod = 2 * time.Minute
defaultTestingDelayOfInitialReadinessStatus = 10 * time.Second
)
type ReplicaCalculator struct {
metricsClient metricsclient.MetricsClient
podsGetter v1coreclient.PodsGetter
tolerance float64
metricsClient metricsclient.MetricsClient
podLister corelisters.PodLister
tolerance float64
cpuInitializationPeriod time.Duration
delayOfInitialReadinessStatus time.Duration
}
func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podsGetter v1coreclient.PodsGetter, tolerance float64) *ReplicaCalculator {
func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podLister corelisters.PodLister, tolerance float64, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) *ReplicaCalculator {
return &ReplicaCalculator{
metricsClient: metricsClient,
podsGetter: podsGetter,
tolerance: tolerance,
metricsClient: metricsClient,
podLister: podLister,
tolerance: tolerance,
cpuInitializationPeriod: cpuInitializationPeriod,
delayOfInitialReadinessStatus: delayOfInitialReadinessStatus,
}
}
@ -58,52 +64,21 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
if err != nil {
return 0, 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
}
podList, err := c.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
podList, err := c.podLister.Pods(namespace).List(selector)
if err != nil {
return 0, 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
}
itemsLen := len(podList.Items)
itemsLen := len(podList)
if itemsLen == 0 {
return 0, 0, 0, time.Time{}, fmt.Errorf("no pods returned by selector while calculating replica count")
}
requests := make(map[string]int64, itemsLen)
readyPodCount := 0
unreadyPods := sets.NewString()
missingPods := sets.NewString()
for _, pod := range podList.Items {
podSum := int64(0)
for _, container := range pod.Spec.Containers {
if containerRequest, ok := container.Resources.Requests[resource]; ok {
podSum += containerRequest.MilliValue()
} else {
return 0, 0, 0, time.Time{}, fmt.Errorf("missing request for %s on container %s in pod %s/%s", resource, container.Name, namespace, pod.Name)
}
}
requests[pod.Name] = podSum
if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(&pod) {
// save this pod name for later, but pretend it doesn't exist for now
if pod.Status.Phase != v1.PodFailed {
// Failed pods should not be counted as unready pods as they will
// not become running anymore.
unreadyPods.Insert(pod.Name)
}
delete(metrics, pod.Name)
continue
}
if _, found := metrics[pod.Name]; !found {
// save this pod name for later, but pretend it doesn't exist for now
missingPods.Insert(pod.Name)
continue
}
readyPodCount++
readyPodCount, ignoredPods, missingPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
removeMetricsForPods(metrics, ignoredPods)
requests, err := calculatePodRequests(podList, resource)
if err != nil {
return 0, 0, 0, time.Time{}, err
}
if len(metrics) == 0 {
@ -115,8 +90,8 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
return 0, 0, 0, time.Time{}, err
}
rebalanceUnready := len(unreadyPods) > 0 && usageRatio > 1.0
if !rebalanceUnready && len(missingPods) == 0 {
rebalanceIgnored := len(ignoredPods) > 0 && usageRatio > 1.0
if !rebalanceIgnored && len(missingPods) == 0 {
if math.Abs(1.0-usageRatio) <= c.tolerance {
// return the current replicas if the change would be too small
return currentReplicas, utilization, rawUtilization, timestamp, nil
@ -130,20 +105,20 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
if usageRatio < 1.0 {
// on a scale-down, treat missing pods as using 100% of the resource request
for podName := range missingPods {
metrics[podName] = requests[podName]
metrics[podName] = metricsclient.PodMetric{Value: requests[podName]}
}
} else if usageRatio > 1.0 {
// on a scale-up, treat missing pods as using 0% of the resource request
for podName := range missingPods {
metrics[podName] = 0
metrics[podName] = metricsclient.PodMetric{Value: 0}
}
}
}
if rebalanceUnready {
if rebalanceIgnored {
// on a scale-up, treat unready pods as using 0% of the resource request
for podName := range unreadyPods {
metrics[podName] = 0
for podName := range ignoredPods {
metrics[podName] = metricsclient.PodMetric{Value: 0}
}
}
@ -172,54 +147,37 @@ func (c *ReplicaCalculator) GetRawResourceReplicas(currentReplicas int32, target
return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
}
replicaCount, utilization, err = c.calcPlainMetricReplicas(metrics, currentReplicas, targetUtilization, namespace, selector)
replicaCount, utilization, err = c.calcPlainMetricReplicas(metrics, currentReplicas, targetUtilization, namespace, selector, resource)
return replicaCount, utilization, timestamp, err
}
// GetMetricReplicas calculates the desired replica count based on a target metric utilization
// (as a milli-value) for pods matching the given selector in the given namespace, and the
// current replica count
func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUtilization int64, metricName string, namespace string, selector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
metrics, timestamp, err := c.metricsClient.GetRawMetric(metricName, namespace, selector)
func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUtilization int64, metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
metrics, timestamp, err := c.metricsClient.GetRawMetric(metricName, namespace, selector, metricSelector)
if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v", metricName, err)
}
replicaCount, utilization, err = c.calcPlainMetricReplicas(metrics, currentReplicas, targetUtilization, namespace, selector)
replicaCount, utilization, err = c.calcPlainMetricReplicas(metrics, currentReplicas, targetUtilization, namespace, selector, v1.ResourceName(""))
return replicaCount, utilization, timestamp, err
}
// calcPlainMetricReplicas calculates the desired replicas for plain (i.e. non-utilization percentage) metrics.
func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMetricsInfo, currentReplicas int32, targetUtilization int64, namespace string, selector labels.Selector) (replicaCount int32, utilization int64, err error) {
podList, err := c.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMetricsInfo, currentReplicas int32, targetUtilization int64, namespace string, selector labels.Selector, resource v1.ResourceName) (replicaCount int32, utilization int64, err error) {
podList, err := c.podLister.Pods(namespace).List(selector)
if err != nil {
return 0, 0, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
}
if len(podList.Items) == 0 {
if len(podList) == 0 {
return 0, 0, fmt.Errorf("no pods returned by selector while calculating replica count")
}
readyPodCount := 0
unreadyPods := sets.NewString()
missingPods := sets.NewString()
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(&pod) {
// save this pod name for later, but pretend it doesn't exist for now
unreadyPods.Insert(pod.Name)
delete(metrics, pod.Name)
continue
}
if _, found := metrics[pod.Name]; !found {
// save this pod name for later, but pretend it doesn't exist for now
missingPods.Insert(pod.Name)
continue
}
readyPodCount++
}
readyPodCount, ignoredPods, missingPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
removeMetricsForPods(metrics, ignoredPods)
if len(metrics) == 0 {
return 0, 0, fmt.Errorf("did not receive metrics for any ready pods")
@ -227,9 +185,9 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
usageRatio, utilization := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization)
rebalanceUnready := len(unreadyPods) > 0 && usageRatio > 1.0
rebalanceIgnored := len(ignoredPods) > 0 && usageRatio > 1.0
if !rebalanceUnready && len(missingPods) == 0 {
if !rebalanceIgnored && len(missingPods) == 0 {
if math.Abs(1.0-usageRatio) <= c.tolerance {
// return the current replicas if the change would be too small
return currentReplicas, utilization, nil
@ -243,20 +201,20 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
if usageRatio < 1.0 {
// on a scale-down, treat missing pods as using 100% of the resource request
for podName := range missingPods {
metrics[podName] = targetUtilization
metrics[podName] = metricsclient.PodMetric{Value: targetUtilization}
}
} else {
// on a scale-up, treat missing pods as using 0% of the resource request
for podName := range missingPods {
metrics[podName] = 0
metrics[podName] = metricsclient.PodMetric{Value: 0}
}
}
}
if rebalanceUnready {
if rebalanceIgnored {
// on a scale-up, treat unready pods as using 0% of the resource request
for podName := range unreadyPods {
metrics[podName] = 0
for podName := range ignoredPods {
metrics[podName] = metricsclient.PodMetric{Value: 0}
}
}
@ -276,8 +234,8 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
// GetObjectMetricReplicas calculates the desired replica count based on a target metric utilization (as a milli-value)
// for the given object in the given namespace, and the current replica count.
func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targetUtilization int64, metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, selector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
utilization, timestamp, err = c.metricsClient.GetObjectMetric(metricName, namespace, objectRef)
func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targetUtilization int64, metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, selector labels.Selector, metricSelector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
utilization, timestamp, err = c.metricsClient.GetObjectMetric(metricName, namespace, objectRef, metricSelector)
if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v on %s %s/%s", metricName, objectRef.Kind, namespace, objectRef.Name, err)
}
@ -303,19 +261,19 @@ func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targe
// of this function. Make this function generic, so we don't repeat the same
// logic in multiple places.
func (c *ReplicaCalculator) getReadyPodsCount(namespace string, selector labels.Selector) (int64, error) {
podList, err := c.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
podList, err := c.podLister.Pods(namespace).List(selector)
if err != nil {
return 0, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
}
if len(podList.Items) == 0 {
if len(podList) == 0 {
return 0, fmt.Errorf("no pods returned by selector while calculating replica count")
}
readyPodCount := 0
for _, pod := range podList.Items {
if pod.Status.Phase == v1.PodRunning && podutil.IsPodReady(&pod) {
for _, pod := range podList {
if pod.Status.Phase == v1.PodRunning && podutil.IsPodReady(pod) {
readyPodCount++
}
}
@ -381,3 +339,62 @@ func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(currentReplicas int3
utilization = int64(math.Ceil(float64(utilization) / float64(currentReplicas)))
return replicaCount, utilization, timestamp, nil
}
func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1.ResourceName, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) (readyPodCount int, ignoredPods sets.String, missingPods sets.String) {
missingPods = sets.NewString()
ignoredPods = sets.NewString()
for _, pod := range pods {
if pod.DeletionTimestamp != nil || pod.Status.Phase == v1.PodFailed {
continue
}
metric, found := metrics[pod.Name]
if !found {
missingPods.Insert(pod.Name)
continue
}
if resource == v1.ResourceCPU {
var ignorePod bool
_, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady)
if condition == nil || pod.Status.StartTime == nil {
ignorePod = true
} else {
// Pod still within possible initialisation period.
if pod.Status.StartTime.Add(cpuInitializationPeriod).After(time.Now()) {
// Ignore sample if pod is unready or one window of metric wasn't collected since last state transition.
ignorePod = condition.Status == v1.ConditionFalse || metric.Timestamp.Before(condition.LastTransitionTime.Time.Add(metric.Window))
} else {
// Ignore metric if pod is unready and it has never been ready.
ignorePod = condition.Status == v1.ConditionFalse && pod.Status.StartTime.Add(delayOfInitialReadinessStatus).After(condition.LastTransitionTime.Time)
}
}
if ignorePod {
ignoredPods.Insert(pod.Name)
continue
}
}
readyPodCount++
}
return
}
func calculatePodRequests(pods []*v1.Pod, resource v1.ResourceName) (map[string]int64, error) {
requests := make(map[string]int64, len(pods))
for _, pod := range pods {
podSum := int64(0)
for _, container := range pod.Spec.Containers {
if containerRequest, ok := container.Resources.Requests[resource]; ok {
podSum += containerRequest.MilliValue()
} else {
return nil, fmt.Errorf("missing request for %s", resource)
}
}
requests[pod.Name] = podSum
}
return requests, nil
}
func removeMetricsForPods(metrics metricsclient.PodMetricsInfo, pods sets.String) {
for _, pod := range pods.UnsortedList() {
delete(metrics, pod)
}
}

View File

@ -22,21 +22,25 @@ import (
"testing"
"time"
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1"
"k8s.io/kubernetes/pkg/controller"
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
metricsfake "k8s.io/metrics/pkg/client/clientset_generated/clientset/fake"
metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake"
cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
emfake "k8s.io/metrics/pkg/client/external_metrics/fake"
@ -56,11 +60,21 @@ type resourceInfo struct {
expectedValue int64
}
type metricType int
const (
objectMetric metricType = iota
externalMetric
externalPerPodMetric
podMetric
)
type metricInfo struct {
name string
levels []int64
singleObject *autoscalingv2.CrossVersionObjectReference
selector *metav1.LabelSelector
metricType metricType
targetUtilization int64
perPodTargetUtilization int64
@ -74,11 +88,14 @@ type replicaCalcTestCase struct {
timestamp time.Time
resource *resourceInfo
metric *metricInfo
resource *resourceInfo
metric *metricInfo
metricLabelSelector labels.Selector
podReadiness []v1.ConditionStatus
podPhase []v1.PodPhase
podReadiness []v1.ConditionStatus
podStartTime []metav1.Time
podPhase []v1.PodPhase
podDeletionTimestamp []bool
}
const (
@ -87,8 +104,7 @@ const (
numContainersPerPod = 2
)
func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient, *emfake.FakeExternalMetricsClient) {
func (tc *replicaCalcTestCase) prepareTestClientSet() *fake.Clientset {
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.PodList{}
@ -102,14 +118,23 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
if tc.podReadiness != nil && i < len(tc.podReadiness) {
podReadiness = tc.podReadiness[i]
}
var podStartTime metav1.Time
if tc.podStartTime != nil {
podStartTime = tc.podStartTime[i]
}
podPhase := v1.PodRunning
if tc.podPhase != nil {
podPhase = tc.podPhase[i]
}
podDeletionTimestamp := false
if tc.podDeletionTimestamp != nil {
podDeletionTimestamp = tc.podDeletionTimestamp[i]
}
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := v1.Pod{
Status: v1.PodStatus{
Phase: podPhase,
Phase: podPhase,
StartTime: &podStartTime,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
@ -128,6 +153,9 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
Containers: []v1.Container{{}, {}},
},
}
if podDeletionTimestamp {
pod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
}
if tc.resource != nil && i < len(tc.resource.requests) {
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
@ -145,7 +173,10 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
}
return true, obj, nil
})
return fakeClient
}
func (tc *replicaCalcTestCase) prepareTestMetricsClient() *metricsfake.Clientset {
fakeMetricsClient := &metricsfake.Clientset{}
// NB: we have to sound like Gollum due to gengo's inability to handle already-plural resource names
fakeMetricsClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
@ -165,6 +196,7 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
Labels: map[string]string{"name": podNamePrefix},
},
Timestamp: metav1.Time{Time: tc.timestamp},
Window: metav1.Duration{Duration: time.Minute},
Containers: make([]metricsapi.ContainerMetrics, numContainersPerPod),
}
@ -185,7 +217,10 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
return true, nil, fmt.Errorf("no pod resource metrics specified in test client")
})
return fakeMetricsClient
}
func (tc *replicaCalcTestCase) prepareTestCMClient(t *testing.T) *cmfake.FakeCustomMetricsClient {
fakeCMClient := &cmfake.FakeCustomMetricsClient{}
fakeCMClient.AddReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
getForAction, wasGetFor := action.(cmfake.GetForAction)
@ -212,9 +247,11 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: testNamespace,
},
Timestamp: metav1.Time{Time: tc.timestamp},
MetricName: tc.metric.name,
Value: *resource.NewMilliQuantity(level, resource.DecimalSI),
Timestamp: metav1.Time{Time: tc.timestamp},
Metric: cmapi.MetricIdentifier{
Name: tc.metric.name,
},
Value: *resource.NewMilliQuantity(level, resource.DecimalSI),
}
metrics.Items = append(metrics.Items, podMetric)
}
@ -242,15 +279,20 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
APIVersion: tc.metric.singleObject.APIVersion,
Name: name,
},
Timestamp: metav1.Time{Time: tc.timestamp},
MetricName: tc.metric.name,
Value: *resource.NewMilliQuantity(int64(tc.metric.levels[0]), resource.DecimalSI),
Timestamp: metav1.Time{Time: tc.timestamp},
Metric: cmapi.MetricIdentifier{
Name: tc.metric.name,
},
Value: *resource.NewMilliQuantity(int64(tc.metric.levels[0]), resource.DecimalSI),
},
}
return true, metrics, nil
})
return fakeCMClient
}
func (tc *replicaCalcTestCase) prepareTestEMClient(t *testing.T) *emfake.FakeExternalMetricsClient {
fakeEMClient := &emfake.FakeExternalMetricsClient{}
fakeEMClient.AddReactor("list", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
listAction, wasList := action.(core.ListAction)
@ -283,18 +325,31 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
return true, &metrics, nil
})
return fakeEMClient
}
func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient, *emfake.FakeExternalMetricsClient) {
fakeClient := tc.prepareTestClientSet()
fakeMetricsClient := tc.prepareTestMetricsClient()
fakeCMClient := tc.prepareTestCMClient(t)
fakeEMClient := tc.prepareTestEMClient(t)
return fakeClient, fakeMetricsClient, fakeCMClient, fakeEMClient
}
func (tc *replicaCalcTestCase) runTest(t *testing.T) {
testClient, testMetricsClient, testCMClient, testEMClient := tc.prepareTestClient(t)
metricsClient := metrics.NewRESTMetricsClient(testMetricsClient.MetricsV1beta1(), testCMClient, testEMClient)
metricsClient := metricsclient.NewRESTMetricsClient(testMetricsClient.MetricsV1beta1(), testCMClient, testEMClient)
replicaCalc := &ReplicaCalculator{
metricsClient: metricsClient,
podsGetter: testClient.Core(),
tolerance: defaultTestingTolerance,
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
informer := informerFactory.Core().V1().Pods()
replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingTolerance, defaultTestingCpuInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
stop := make(chan struct{})
defer close(stop)
informerFactory.Start(stop)
if !controller.WaitForCacheSync("HPA", stop, informer.Informer().HasSynced) {
return
}
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
@ -317,34 +372,50 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
assert.Equal(t, tc.resource.expectedUtilization, outUtilization, "utilization should be as expected")
assert.Equal(t, tc.resource.expectedValue, outRawValue, "raw value should be as expected")
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
} else {
var outReplicas int32
var outUtilization int64
var outTimestamp time.Time
var err error
if tc.metric.singleObject != nil {
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.singleObject, selector)
} else if tc.metric.selector != nil {
if tc.metric.targetUtilization > 0 {
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.selector, selector)
} else if tc.metric.perPodTargetUtilization > 0 {
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUtilization, tc.metric.name, testNamespace, tc.metric.selector)
}
} else {
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector)
}
if tc.expectedError != nil {
require.Error(t, err, "there should be an error calculating the replica count")
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
return
}
require.NoError(t, err, "there should not have been an error calculating the replica count")
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
assert.Equal(t, tc.metric.expectedUtilization, outUtilization, "utilization should be as expected")
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
return
}
var outReplicas int32
var outUtilization int64
var outTimestamp time.Time
switch tc.metric.metricType {
case objectMetric:
if tc.metric.singleObject == nil {
t.Fatal("Metric specified as objectMetric but metric.singleObject is nil.")
}
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.singleObject, selector, nil)
case externalMetric:
if tc.metric.selector == nil {
t.Fatal("Metric specified as externalMetric but metric.selector is nil.")
}
if tc.metric.targetUtilization <= 0 {
t.Fatalf("Metric specified as externalMetric but metric.targetUtilization is %d which is <=0.", tc.metric.targetUtilization)
}
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.selector, selector)
case externalPerPodMetric:
if tc.metric.selector == nil {
t.Fatal("Metric specified as externalPerPodMetric but metric.selector is nil.")
}
if tc.metric.perPodTargetUtilization <= 0 {
t.Fatalf("Metric specified as externalPerPodMetric but metric.perPodTargetUtilization is %d which is <=0.", tc.metric.perPodTargetUtilization)
}
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUtilization, tc.metric.name, testNamespace, tc.metric.selector)
case podMetric:
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector, nil)
default:
t.Fatalf("Unknown metric type: %d", tc.metric.metricType)
}
if tc.expectedError != nil {
require.Error(t, err, "there should be an error calculating the replica count")
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
return
}
require.NoError(t, err, "there should not have been an error calculating the replica count")
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
assert.Equal(t, tc.metric.expectedUtilization, outUtilization, "utilization should be as expected")
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
}
func TestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
@ -398,6 +469,24 @@ func TestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
tc.runTest(t)
}
func TestReplicaCalcScaleUpHotCpuLessScale(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 4,
podStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime()},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{300, 500, 700},
targetUtilization: 30,
expectedUtilization: 60,
expectedValue: numContainersPerPod * 600,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
@ -416,6 +505,25 @@ func TestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
tc.runTest(t)
}
func TestReplicaCalcScaleHotCpuNoScale(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
podStartTime: []metav1.Time{coolCpuCreationTime(), hotCpuCreationTime(), hotCpuCreationTime()},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{400, 500, 700},
targetUtilization: 30,
expectedUtilization: 40,
expectedValue: numContainersPerPod * 400,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUpIgnoresFailedPods(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 2,
@ -435,6 +543,26 @@ func TestReplicaCalcScaleUpIgnoresFailedPods(t *testing.T) {
tc.runTest(t)
}
func TestReplicaCalcScaleUpIgnoresDeletionPods(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 2,
expectedReplicas: 4,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning},
podDeletionTimestamp: []bool{false, false, true, true},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{500, 700},
targetUtilization: 30,
expectedUtilization: 60,
expectedValue: numContainersPerPod * 600,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUpCM(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
@ -444,36 +572,41 @@ func TestReplicaCalcScaleUpCM(t *testing.T) {
levels: []int64{20000, 10000, 30000},
targetUtilization: 15000,
expectedUtilization: 20000,
metricType: podMetric,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
func TestReplicaCalcScaleUpCMUnreadyHotCpuNoLessScale(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 4,
expectedReplicas: 6,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
podStartTime: []metav1.Time{coolCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime()},
metric: &metricInfo{
name: "qps",
levels: []int64{50000, 10000, 30000},
targetUtilization: 15000,
expectedUtilization: 30000,
metricType: podMetric,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
func TestReplicaCalcScaleUpCMUnreadyHotCpuScaleWouldScaleDown(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
expectedReplicas: 7,
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
podStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime()},
metric: &metricInfo{
name: "qps",
levels: []int64{50000, 15000, 30000},
targetUtilization: 15000,
expectedUtilization: 15000,
expectedUtilization: 31666,
metricType: podMetric,
},
}
tc.runTest(t)
@ -528,6 +661,7 @@ func TestReplicaCalcScaleUpCMExternal(t *testing.T) {
targetUtilization: 4400,
expectedUtilization: 8600,
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricType: podMetric,
},
}
tc.runTest(t)
@ -544,6 +678,7 @@ func TestReplicaCalcScaleUpCMExternalIgnoresUnreadyPods(t *testing.T) {
targetUtilization: 4400,
expectedUtilization: 8600,
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricType: externalMetric,
},
}
tc.runTest(t)
@ -558,6 +693,7 @@ func TestReplicaCalcScaleUpCMExternalNoLabels(t *testing.T) {
levels: []int64{8600},
targetUtilization: 4400,
expectedUtilization: 8600,
metricType: podMetric,
},
}
tc.runTest(t)
@ -573,6 +709,7 @@ func TestReplicaCalcScaleUpPerPodCMExternal(t *testing.T) {
perPodTargetUtilization: 2150,
expectedUtilization: 2867,
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricType: externalPerPodMetric,
},
}
tc.runTest(t)
@ -604,6 +741,7 @@ func TestReplicaCalcScaleDownCM(t *testing.T) {
levels: []int64{12000, 12000, 12000, 12000, 12000},
targetUtilization: 20000,
expectedUtilization: 12000,
metricType: podMetric,
},
}
tc.runTest(t)
@ -638,6 +776,7 @@ func TestReplicaCalcScaleDownCMExternal(t *testing.T) {
targetUtilization: 14334,
expectedUtilization: 8600,
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricType: externalMetric,
},
}
tc.runTest(t)
@ -653,12 +792,13 @@ func TestReplicaCalcScaleDownPerPodCMExternal(t *testing.T) {
perPodTargetUtilization: 2867,
expectedUtilization: 1720,
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricType: externalPerPodMetric,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
func TestReplicaCalcScaleDownIncludeUnreadyPods(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 5,
expectedReplicas: 2,
@ -676,6 +816,24 @@ func TestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
tc.runTest(t)
}
func TestReplicaCalcScaleDownIgnoreHotCpuPods(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 5,
expectedReplicas: 2,
podStartTime: []metav1.Time{coolCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime(), hotCpuCreationTime()},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 300, 500, 250, 250},
targetUtilization: 50,
expectedUtilization: 30,
expectedValue: numContainersPerPod * 300,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleDownIgnoresFailedPods(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 5,
@ -695,6 +853,26 @@ func TestReplicaCalcScaleDownIgnoresFailedPods(t *testing.T) {
tc.runTest(t)
}
func TestReplicaCalcScaleDownIgnoresDeletionPods(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 5,
expectedReplicas: 3,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning},
podDeletionTimestamp: []bool{false, false, false, false, false, true, true},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 300, 500, 250, 250},
targetUtilization: 50,
expectedUtilization: 28,
expectedValue: numContainersPerPod * 280,
},
}
tc.runTest(t)
}
func TestReplicaCalcTolerance(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
@ -721,6 +899,7 @@ func TestReplicaCalcToleranceCM(t *testing.T) {
levels: []int64{20000, 21000, 21000},
targetUtilization: 20000,
expectedUtilization: 20666,
metricType: podMetric,
},
}
tc.runTest(t)
@ -755,6 +934,7 @@ func TestReplicaCalcToleranceCMExternal(t *testing.T) {
targetUtilization: 8888,
expectedUtilization: 8600,
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricType: externalMetric,
},
}
tc.runTest(t)
@ -770,6 +950,7 @@ func TestReplicaCalcTolerancePerPodCMExternal(t *testing.T) {
perPodTargetUtilization: 2900,
expectedUtilization: 2867,
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricType: externalPerPodMetric,
},
}
tc.runTest(t)
@ -889,7 +1070,7 @@ func TestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
func TestReplicaCalcMissingMetricsUnreadyChange(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
@ -907,6 +1088,24 @@ func TestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsHotCpuNoChange(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
podStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime()},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 450},
targetUtilization: 50,
expectedUtilization: 45,
expectedValue: numContainersPerPod * 450,
},
}
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
@ -925,6 +1124,25 @@ func TestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsHotCpuScaleUp(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 4,
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
podStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime()},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 2000},
targetUtilization: 50,
expectedUtilization: 200,
expectedValue: numContainersPerPod * 2000,
},
}
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 4,
@ -1015,4 +1233,336 @@ func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
tc.runTest(t)
}
func TestGroupPods(t *testing.T) {
tests := []struct {
name string
pods []*v1.Pod
metrics metricsclient.PodMetricsInfo
resource v1.ResourceName
expectReadyPodCount int
expectIgnoredPods sets.String
expectMissingPods sets.String
}{
{
"void",
[]*v1.Pod{},
metricsclient.PodMetricsInfo{},
v1.ResourceCPU,
0,
sets.NewString(),
sets.NewString(),
},
{
"count in a ready pod - memory",
[]*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "bentham",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
},
},
metricsclient.PodMetricsInfo{
"bentham": metricsclient.PodMetric{Value: 1, Timestamp: time.Now(), Window: time.Minute},
},
v1.ResourceMemory,
1,
sets.NewString(),
sets.NewString(),
},
{
"ignore a pod without ready condition - CPU",
[]*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "lucretius",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now(),
},
},
},
},
metricsclient.PodMetricsInfo{
"lucretius": metricsclient.PodMetric{Value: 1},
},
v1.ResourceCPU,
0,
sets.NewString("lucretius"),
sets.NewString(),
},
{
"count in a ready pod with fresh metrics during initialization period - CPU",
[]*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "bentham",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now().Add(-1 * time.Minute),
},
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
LastTransitionTime: metav1.Time{Time: time.Now().Add(-30 * time.Second)},
Status: v1.ConditionTrue,
},
},
},
},
},
metricsclient.PodMetricsInfo{
"bentham": metricsclient.PodMetric{Value: 1, Timestamp: time.Now(), Window: 30 * time.Second},
},
v1.ResourceCPU,
1,
sets.NewString(),
sets.NewString(),
},
{
"ignore a ready pod without fresh metrics during initialization period - CPU",
[]*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "bentham",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now().Add(-1 * time.Minute),
},
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
LastTransitionTime: metav1.Time{Time: time.Now().Add(-30 * time.Second)},
Status: v1.ConditionTrue,
},
},
},
},
},
metricsclient.PodMetricsInfo{
"bentham": metricsclient.PodMetric{Value: 1, Timestamp: time.Now(), Window: 60 * time.Second},
},
v1.ResourceCPU,
0,
sets.NewString("bentham"),
sets.NewString(),
},
{
"ignore an unready pod during initialization period - CPU",
[]*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "lucretius",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now().Add(-10 * time.Minute),
},
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
LastTransitionTime: metav1.Time{Time: time.Now().Add(-9*time.Minute - 54*time.Second)},
Status: v1.ConditionFalse,
},
},
},
},
},
metricsclient.PodMetricsInfo{
"lucretius": metricsclient.PodMetric{Value: 1},
},
v1.ResourceCPU,
0,
sets.NewString("lucretius"),
sets.NewString(),
},
{
"count in a ready pod without fresh metrics after initialization period - CPU",
[]*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "bentham",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now().Add(-3 * time.Minute),
},
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
LastTransitionTime: metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
Status: v1.ConditionTrue,
},
},
},
},
},
metricsclient.PodMetricsInfo{
"bentham": metricsclient.PodMetric{Value: 1, Timestamp: time.Now().Add(-2 * time.Minute), Window: time.Minute},
},
v1.ResourceCPU,
1,
sets.NewString(),
sets.NewString(),
},
{
"count in an unready pod that was ready after initialization period - CPU",
[]*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "lucretius",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now().Add(-10 * time.Minute),
},
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
LastTransitionTime: metav1.Time{Time: time.Now().Add(-9 * time.Minute)},
Status: v1.ConditionFalse,
},
},
},
},
},
metricsclient.PodMetricsInfo{
"lucretius": metricsclient.PodMetric{Value: 1},
},
v1.ResourceCPU,
1,
sets.NewString(),
sets.NewString(),
},
{
"ignore pod that has never been ready after initialization period - CPU",
[]*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "lucretius",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now().Add(-10 * time.Minute),
},
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
LastTransitionTime: metav1.Time{Time: time.Now().Add(-9*time.Minute - 50*time.Second)},
Status: v1.ConditionFalse,
},
},
},
},
},
metricsclient.PodMetricsInfo{
"lucretius": metricsclient.PodMetric{Value: 1},
},
v1.ResourceCPU,
1,
sets.NewString(),
sets.NewString(),
},
{
"a missing pod",
[]*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "epicurus",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now().Add(-3 * time.Minute),
},
},
},
},
metricsclient.PodMetricsInfo{},
v1.ResourceCPU,
0,
sets.NewString(),
sets.NewString("epicurus"),
},
{
"several pods",
[]*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "lucretius",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now(),
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "niccolo",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now().Add(-3 * time.Minute),
},
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
LastTransitionTime: metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
Status: v1.ConditionTrue,
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "epicurus",
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
StartTime: &metav1.Time{
Time: time.Now().Add(-3 * time.Minute),
},
},
},
},
metricsclient.PodMetricsInfo{
"lucretius": metricsclient.PodMetric{Value: 1},
"niccolo": metricsclient.PodMetric{Value: 1},
},
v1.ResourceCPU,
1,
sets.NewString("lucretius"),
sets.NewString("epicurus"),
},
}
for _, tc := range tests {
readyPodCount, ignoredPods, missingPods := groupPods(tc.pods, tc.metrics, tc.resource, defaultTestingCpuInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
if readyPodCount != tc.expectReadyPodCount {
t.Errorf("%s got readyPodCount %d, expected %d", tc.name, readyPodCount, tc.expectReadyPodCount)
}
if !ignoredPods.Equal(tc.expectIgnoredPods) {
t.Errorf("%s got unreadyPods %v, expected %v", tc.name, ignoredPods, tc.expectIgnoredPods)
}
if !missingPods.Equal(tc.expectMissingPods) {
t.Errorf("%s got missingPods %v, expected %v", tc.name, missingPods, tc.expectMissingPods)
}
}
}
// TODO: add more tests