mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
Fresh dep ensure
This commit is contained in:
72
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD
generated
vendored
72
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD
generated
vendored
@ -1,10 +1,4 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -15,21 +9,22 @@ go_library(
|
||||
"utilization.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta2:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
|
||||
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -44,26 +39,26 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/extensions/install:go_default_library",
|
||||
"//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta2:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/external_metrics/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics/fake:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/external_metrics/fake:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/external_metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/custom_metrics/fake:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/external_metrics/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -78,4 +73,5 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/interfaces.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/interfaces.go
generated
vendored
@ -19,14 +19,20 @@ package metrics
|
||||
import (
|
||||
"time"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta1"
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
// PodMetricsInfo contains pod metric values as a map from pod names to
|
||||
// metric values (the metric values are expected to be the metric as a milli-value)
|
||||
type PodMetricsInfo map[string]int64
|
||||
// PodMetric contains pod metric value (the metric values are expected to be the metric as a milli-value)
|
||||
type PodMetric struct {
|
||||
Timestamp time.Time
|
||||
Window time.Duration
|
||||
Value int64
|
||||
}
|
||||
|
||||
// PodMetricsInfo contains pod metrics as a map from pod names to PodMetricsInfo
|
||||
type PodMetricsInfo map[string]PodMetric
|
||||
|
||||
// MetricsClient knows how to query a remote interface to retrieve container-level
|
||||
// resource metrics as well as pod-level arbitrary metrics
|
||||
@ -37,11 +43,11 @@ type MetricsClient interface {
|
||||
|
||||
// GetRawMetric gets the given metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
|
||||
GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error)
|
||||
|
||||
// GetObjectMetric gets the given metric (and an associated timestamp) for the given
|
||||
// object in the given namespace
|
||||
GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error)
|
||||
GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, metricSelector labels.Selector) (int64, time.Time, error)
|
||||
|
||||
// GetExternalMetric gets all the values of a given external metric
|
||||
// that match the specified selector.
|
||||
|
36
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go
generated
vendored
@ -22,11 +22,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
"k8s.io/klog"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta1"
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -35,10 +35,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultHeapsterNamespace = "kube-system"
|
||||
DefaultHeapsterScheme = "http"
|
||||
DefaultHeapsterService = "heapster"
|
||||
DefaultHeapsterPort = "" // use the first exposed port on the service
|
||||
DefaultHeapsterNamespace = "kube-system"
|
||||
DefaultHeapsterScheme = "http"
|
||||
DefaultHeapsterService = "heapster"
|
||||
DefaultHeapsterPort = "" // use the first exposed port on the service
|
||||
heapsterDefaultMetricWindow = time.Minute
|
||||
)
|
||||
|
||||
var heapsterQueryStart = -5 * time.Minute
|
||||
@ -72,7 +73,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod resource metrics: %v", err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
klog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
err = json.Unmarshal(resultRaw, &metrics)
|
||||
@ -93,14 +94,18 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
continue
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
}
|
||||
|
||||
if !missing {
|
||||
res[m.Name] = int64(podSum)
|
||||
res[m.Name] = PodMetric{
|
||||
Timestamp: m.Timestamp.Time,
|
||||
Window: m.Window.Duration,
|
||||
Value: int64(podSum),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -109,7 +114,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
||||
return res, timestamp, nil
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
podList, err := h.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
|
||||
@ -145,7 +150,7 @@ func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string
|
||||
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
klog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
if len(metrics.Items) != len(podNames) {
|
||||
// if we get too many metrics or two few metrics, we have no way of knowing which metric goes to which pod
|
||||
@ -159,7 +164,12 @@ func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string
|
||||
for i, podMetrics := range metrics.Items {
|
||||
val, podTimestamp, hadMetrics := collapseTimeSamples(podMetrics, time.Minute)
|
||||
if hadMetrics {
|
||||
res[podNames[i]] = val
|
||||
res[podNames[i]] = PodMetric{
|
||||
Timestamp: podTimestamp,
|
||||
Window: heapsterDefaultMetricWindow,
|
||||
Value: int64(val),
|
||||
}
|
||||
|
||||
if timestamp == nil || podTimestamp.Before(*timestamp) {
|
||||
timestamp = &podTimestamp
|
||||
}
|
||||
@ -173,7 +183,7 @@ func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string
|
||||
return res, *timestamp, nil
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error) {
|
||||
func (h *HeapsterMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, metricSelector labels.Selector) (int64, time.Time, error) {
|
||||
return 0, time.Time{}, fmt.Errorf("object metrics are not yet supported")
|
||||
}
|
||||
|
||||
|
@ -68,13 +68,15 @@ type testCase struct {
|
||||
|
||||
replicas int
|
||||
targetTimestamp int
|
||||
window time.Duration
|
||||
reportedMetricsPoints [][]metricPoint
|
||||
reportedPodMetrics [][]int64
|
||||
|
||||
namespace string
|
||||
selector labels.Selector
|
||||
resourceName v1.ResourceName
|
||||
metricName string
|
||||
namespace string
|
||||
selector labels.Selector
|
||||
metricSelector labels.Selector
|
||||
resourceName v1.ResourceName
|
||||
metricName string
|
||||
}
|
||||
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
@ -108,7 +110,8 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
|
||||
Timestamp: metav1.Time{Time: offsetTimestampBy(tc.targetTimestamp)},
|
||||
Window: metav1.Duration{Duration: tc.window},
|
||||
Containers: []metricsapi.ContainerMetrics{},
|
||||
}
|
||||
for j, cpu := range containers {
|
||||
@ -137,7 +140,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
|
||||
var heapsterMetricPoints []heapster.MetricPoint
|
||||
for _, reportedMetricPoint := range reportedMetricPoints {
|
||||
timestamp := fixedTimestamp.Add(time.Duration(reportedMetricPoint.timestamp) * time.Minute)
|
||||
timestamp := offsetTimestampBy(reportedMetricPoint.timestamp)
|
||||
if latestTimestamp.Before(timestamp) {
|
||||
latestTimestamp = timestamp
|
||||
}
|
||||
@ -196,10 +199,20 @@ func (tc *testCase) verifyResults(t *testing.T, metrics PodMetricsInfo, timestam
|
||||
}
|
||||
assert.NoError(t, err, "there should be no error retrieving the metrics")
|
||||
assert.NotNil(t, metrics, "there should be metrics returned")
|
||||
if len(metrics) != len(tc.desiredMetricValues) {
|
||||
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
|
||||
} else {
|
||||
for k, m := range metrics {
|
||||
if !m.Timestamp.Equal(tc.desiredMetricValues[k].Timestamp) ||
|
||||
m.Window != tc.desiredMetricValues[k].Window ||
|
||||
m.Value != tc.desiredMetricValues[k].Value {
|
||||
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.desiredMetricValues, metrics, "the metrics values should be as expected")
|
||||
|
||||
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
|
||||
targetTimestamp := offsetTimestampBy(tc.targetTimestamp)
|
||||
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
|
||||
}
|
||||
|
||||
@ -211,71 +224,92 @@ func (tc *testCase) runTest(t *testing.T) {
|
||||
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector)
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
} else {
|
||||
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)
|
||||
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector, tc.metricSelector)
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCPU(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
|
||||
"test-pod-0": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-2": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: 1,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQPS(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 10000, "test-pod-1": 20000, "test-pod-2": 10000,
|
||||
"test-pod-0": PodMetric{Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-1": PodMetric{Value: 20000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-2": PodMetric{Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: 1,
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsSumEqualZero(t *testing.T) {
|
||||
targetTimestamp := 0
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 0, "test-pod-1": 0, "test-pod-2": 0,
|
||||
"test-pod-0": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-1": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-2": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: 0,
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUMoreMetrics(t *testing.T) {
|
||||
targetTimestamp := 10
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 5,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
|
||||
"test-pod-3": 5000, "test-pod-4": 5000,
|
||||
"test-pod-0": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-2": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-3": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-4": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: 10,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUMissingMetrics(t *testing.T) {
|
||||
targetTimestamp := 0
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 4000,
|
||||
"test-pod-0": PodMetric{Value: 4000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{4000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -314,13 +348,15 @@ func TestCPUEmptyMetrics(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQpsEmptyEntries(t *testing.T) {
|
||||
targetTimestamp := 4
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
metricName: "qps",
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 4000000, "test-pod-2": 2000000,
|
||||
"test-pod-0": PodMetric{Value: 4000000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-2": PodMetric{Value: 2000000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
},
|
||||
targetTimestamp: 4,
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {}, {{2000, 4}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -337,12 +373,17 @@ func TestCPUZeroReplicas(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
targetTimestamp := 0
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 100, "test-pod-1": 700,
|
||||
"test-pod-0": PodMetric{Value: 100, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": PodMetric{Value: 700, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -363,3 +404,7 @@ func testCollapseTimeSamples(t *testing.T) {
|
||||
assert.InEpsilon(t, float64(75), val, 0.1, "collapsed sample value should be as expected")
|
||||
assert.True(t, timestamp.Equal(now), "timestamp should be the current time (the newest)")
|
||||
}
|
||||
|
||||
func offsetTimestampBy(t int) time.Time {
|
||||
return fixedTimestamp.Add(time.Duration(t) * time.Minute)
|
||||
}
|
||||
|
42
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/rest_metrics_client.go
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/rest_metrics_client.go
generated
vendored
@ -20,19 +20,23 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta1"
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
customapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1"
|
||||
resourceclient "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1"
|
||||
customapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
|
||||
resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
|
||||
customclient "k8s.io/metrics/pkg/client/custom_metrics"
|
||||
externalclient "k8s.io/metrics/pkg/client/external_metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
metricServerDefaultMetricWindow = time.Minute
|
||||
)
|
||||
|
||||
func NewRESTMetricsClient(resourceClient resourceclient.PodMetricsesGetter, customClient customclient.CustomMetricsClient, externalClient externalclient.ExternalMetricsClient) MetricsClient {
|
||||
return &restMetricsClient{
|
||||
&resourceMetricsClient{resourceClient},
|
||||
@ -77,14 +81,18 @@ func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
break // containers loop
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
}
|
||||
|
||||
if !missing {
|
||||
res[m.Name] = int64(podSum)
|
||||
res[m.Name] = PodMetric{
|
||||
Timestamp: m.Timestamp.Time,
|
||||
Window: m.Window.Duration,
|
||||
Value: int64(podSum),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,8 +109,8 @@ type customMetricsClient struct {
|
||||
|
||||
// GetRawMetric gets the given metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
func (c *customMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
metrics, err := c.client.NamespacedMetrics(namespace).GetForObjects(schema.GroupKind{Kind: "Pod"}, selector, metricName)
|
||||
func (c *customMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
metrics, err := c.client.NamespacedMetrics(namespace).GetForObjects(schema.GroupKind{Kind: "Pod"}, selector, metricName, metricSelector)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from custom metrics API: %v", err)
|
||||
}
|
||||
@ -113,7 +121,17 @@ func (c *customMetricsClient) GetRawMetric(metricName string, namespace string,
|
||||
|
||||
res := make(PodMetricsInfo, len(metrics.Items))
|
||||
for _, m := range metrics.Items {
|
||||
res[m.DescribedObject.Name] = m.Value.MilliValue()
|
||||
window := metricServerDefaultMetricWindow
|
||||
if m.WindowSeconds != nil {
|
||||
window = time.Duration(*m.WindowSeconds) * time.Second
|
||||
}
|
||||
res[m.DescribedObject.Name] = PodMetric{
|
||||
Timestamp: m.Timestamp.Time,
|
||||
Window: window,
|
||||
Value: int64(m.Value.MilliValue()),
|
||||
}
|
||||
|
||||
m.Value.MilliValue()
|
||||
}
|
||||
|
||||
timestamp := metrics.Items[0].Timestamp.Time
|
||||
@ -123,7 +141,7 @@ func (c *customMetricsClient) GetRawMetric(metricName string, namespace string,
|
||||
|
||||
// GetObjectMetric gets the given metric (and an associated timestamp) for the given
|
||||
// object in the given namespace
|
||||
func (c *customMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error) {
|
||||
func (c *customMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, metricSelector labels.Selector) (int64, time.Time, error) {
|
||||
gvk := schema.FromAPIVersionAndKind(objectRef.APIVersion, objectRef.Kind)
|
||||
var metricValue *customapi.MetricValue
|
||||
var err error
|
||||
@ -131,9 +149,9 @@ func (c *customMetricsClient) GetObjectMetric(metricName string, namespace strin
|
||||
// handle namespace separately
|
||||
// NB: we ignore namespace name here, since CrossVersionObjectReference isn't
|
||||
// supposed to allow you to escape your namespace
|
||||
metricValue, err = c.client.RootScopedMetrics().GetForObject(gvk.GroupKind(), namespace, metricName)
|
||||
metricValue, err = c.client.RootScopedMetrics().GetForObject(gvk.GroupKind(), namespace, metricName, metricSelector)
|
||||
} else {
|
||||
metricValue, err = c.client.NamespacedMetrics(namespace).GetForObject(gvk.GroupKind(), objectRef.Name, metricName)
|
||||
metricValue, err = c.client.NamespacedMetrics(namespace).GetForObject(gvk.GroupKind(), objectRef.Name, metricName, metricSelector)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
autoscalingapi "k8s.io/api/autoscaling/v2beta1"
|
||||
autoscalingapi "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -32,10 +32,10 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
|
||||
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1"
|
||||
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
|
||||
emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsfake "k8s.io/metrics/pkg/client/clientset_generated/clientset/fake"
|
||||
metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
|
||||
emfake "k8s.io/metrics/pkg/client/external_metrics/fake"
|
||||
|
||||
@ -48,6 +48,7 @@ type restClientTestCase struct {
|
||||
|
||||
// "timestamps" here are actually the offset in minutes from a base timestamp
|
||||
targetTimestamp int
|
||||
window time.Duration
|
||||
reportedMetricPoints []metricPoint
|
||||
reportedPodMetrics [][]int64
|
||||
singleObject *autoscalingapi.CrossVersionObjectReference
|
||||
@ -86,7 +87,8 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
|
||||
Namespace: namespace,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
|
||||
Timestamp: metav1.Time{Time: offsetTimestampBy(tc.targetTimestamp)},
|
||||
Window: metav1.Duration{Duration: tc.window},
|
||||
Containers: []metricsapi.ContainerMetrics{},
|
||||
}
|
||||
for j, cpu := range containers {
|
||||
@ -115,7 +117,7 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
|
||||
|
||||
metrics := emapi.ExternalMetricValueList{}
|
||||
for _, metricPoint := range tc.reportedMetricPoints {
|
||||
timestamp := fixedTimestamp.Add(time.Duration(metricPoint.timestamp) * time.Minute)
|
||||
timestamp := offsetTimestampBy(metricPoint.timestamp)
|
||||
metric := emapi.ExternalMetricValue{
|
||||
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
|
||||
Timestamp: metav1.Time{Time: timestamp},
|
||||
@ -136,16 +138,18 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
|
||||
assert.Equal(t, "pods", getForAction.GetResource().Resource, "type of object that we requested multiple metrics for should have been pods")
|
||||
|
||||
for i, metricPoint := range tc.reportedMetricPoints {
|
||||
timestamp := fixedTimestamp.Add(time.Duration(metricPoint.timestamp) * time.Minute)
|
||||
timestamp := offsetTimestampBy(metricPoint.timestamp)
|
||||
metric := cmapi.MetricValue{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
},
|
||||
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
|
||||
Timestamp: metav1.Time{Time: timestamp},
|
||||
MetricName: tc.metricName,
|
||||
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
|
||||
Timestamp: metav1.Time{Time: timestamp},
|
||||
Metric: cmapi.MetricIdentifier{
|
||||
Name: tc.metricName,
|
||||
},
|
||||
}
|
||||
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
@ -166,7 +170,7 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
|
||||
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
|
||||
assert.Equal(t, tc.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
|
||||
metricPoint := tc.reportedMetricPoints[0]
|
||||
timestamp := fixedTimestamp.Add(time.Duration(metricPoint.timestamp) * time.Minute)
|
||||
timestamp := offsetTimestampBy(metricPoint.timestamp)
|
||||
|
||||
metrics := &cmapi.MetricValueList{
|
||||
Items: []cmapi.MetricValue{
|
||||
@ -176,9 +180,11 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
|
||||
APIVersion: tc.singleObject.APIVersion,
|
||||
Name: tc.singleObject.Name,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: timestamp},
|
||||
MetricName: tc.metricName,
|
||||
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
|
||||
Timestamp: metav1.Time{Time: timestamp},
|
||||
Metric: cmapi.MetricIdentifier{
|
||||
Name: tc.metricName,
|
||||
},
|
||||
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -200,9 +206,20 @@ func (tc *restClientTestCase) verifyResults(t *testing.T, metrics PodMetricsInfo
|
||||
assert.NoError(t, err, "there should be no error retrieving the metrics")
|
||||
assert.NotNil(t, metrics, "there should be metrics returned")
|
||||
|
||||
assert.Equal(t, tc.desiredMetricValues, metrics, "the metrics values should be as expected")
|
||||
if len(metrics) != len(tc.desiredMetricValues) {
|
||||
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
|
||||
} else {
|
||||
for k, m := range metrics {
|
||||
if !m.Timestamp.Equal(tc.desiredMetricValues[k].Timestamp) ||
|
||||
m.Window != tc.desiredMetricValues[k].Window ||
|
||||
m.Value != tc.desiredMetricValues[k].Value {
|
||||
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
|
||||
targetTimestamp := offsetTimestampBy(tc.targetTimestamp)
|
||||
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
|
||||
}
|
||||
|
||||
@ -223,26 +240,31 @@ func (tc *restClientTestCase) runTest(t *testing.T) {
|
||||
val, timestamp, err := metricsClient.GetExternalMetric(tc.metricName, tc.namespace, tc.metricLabelSelector)
|
||||
info := make(PodMetricsInfo, len(val))
|
||||
for i, metricVal := range val {
|
||||
info[fmt.Sprintf("%v-val-%v", tc.metricName, i)] = metricVal
|
||||
info[fmt.Sprintf("%v-val-%v", tc.metricName, i)] = PodMetric{Value: metricVal}
|
||||
}
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
} else if tc.singleObject == nil {
|
||||
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)
|
||||
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector, tc.metricLabelSelector)
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
} else {
|
||||
val, timestamp, err := metricsClient.GetObjectMetric(tc.metricName, tc.namespace, tc.singleObject)
|
||||
info := PodMetricsInfo{tc.singleObject.Name: val}
|
||||
val, timestamp, err := metricsClient.GetObjectMetric(tc.metricName, tc.namespace, tc.singleObject, tc.metricLabelSelector)
|
||||
info := PodMetricsInfo{tc.singleObject.Name: {Value: val}}
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRESTClientCPU(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
window := 30 * time.Second
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
|
||||
"test-pod-0": {Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": {Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-2": {Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: 1,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -251,7 +273,7 @@ func TestRESTClientCPU(t *testing.T) {
|
||||
func TestRESTClientExternal(t *testing.T) {
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"external-val-0": 10000, "external-val-1": 20000, "external-val-2": 10000,
|
||||
"external-val-0": {Value: 10000}, "external-val-1": {Value: 20000}, "external-val-2": {Value: 10000},
|
||||
},
|
||||
metricSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
metricName: "external",
|
||||
@ -262,12 +284,15 @@ func TestRESTClientExternal(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRESTClientQPS(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 10000, "test-pod-1": 20000, "test-pod-2": 10000,
|
||||
"test-pod-0": {Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
|
||||
"test-pod-1": {Value: 20000, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
|
||||
"test-pod-2": {Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: 1,
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricPoints: []metricPoint{{10000, 1}, {20000, 1}, {10000, 1}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -275,7 +300,7 @@ func TestRESTClientQPS(t *testing.T) {
|
||||
|
||||
func TestRESTClientSingleObject(t *testing.T) {
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{"some-dep": 10},
|
||||
desiredMetricValues: PodMetricsInfo{"some-dep": {Value: 10}},
|
||||
metricName: "queue-length",
|
||||
targetTimestamp: 1,
|
||||
reportedMetricPoints: []metricPoint{{10, 1}},
|
||||
@ -289,12 +314,15 @@ func TestRESTClientSingleObject(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRESTClientQpsSumEqualZero(t *testing.T) {
|
||||
targetTimestamp := 0
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 0, "test-pod-1": 0, "test-pod-2": 0,
|
||||
"test-pod-0": {Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
|
||||
"test-pod-1": {Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
|
||||
"test-pod-2": {Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: metricServerDefaultMetricWindow},
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: 0,
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricPoints: []metricPoint{{0, 0}, {0, 0}, {0, 0}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -303,7 +331,7 @@ func TestRESTClientQpsSumEqualZero(t *testing.T) {
|
||||
func TestRESTClientExternalSumEqualZero(t *testing.T) {
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"external-val-0": 0, "external-val-1": 0, "external-val-2": 0,
|
||||
"external-val-0": {Value: 0}, "external-val-1": {Value: 0}, "external-val-2": {Value: 0},
|
||||
},
|
||||
metricSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
metricName: "external",
|
||||
@ -345,11 +373,16 @@ func TestRESTClientCPUEmptyMetrics(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRESTClientCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
window := 30 * time.Second
|
||||
tc := restClientTestCase{
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 100, "test-pod-1": 700,
|
||||
"test-pod-0": {Value: 100, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": {Value: 700, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/utilization.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/utilization.go
generated
vendored
@ -28,14 +28,14 @@ func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int
|
||||
requestsTotal := int64(0)
|
||||
numEntries := 0
|
||||
|
||||
for podName, metricValue := range metrics {
|
||||
for podName, metric := range metrics {
|
||||
request, hasRequest := requests[podName]
|
||||
if !hasRequest {
|
||||
// we check for missing requests elsewhere, so assuming missing requests == extraneous metrics
|
||||
continue
|
||||
}
|
||||
|
||||
metricsTotal += metricValue
|
||||
metricsTotal += metric.Value
|
||||
requestsTotal += request
|
||||
numEntries++
|
||||
}
|
||||
@ -56,8 +56,8 @@ func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int
|
||||
// (returning that and the actual utilization)
|
||||
func GetMetricUtilizationRatio(metrics PodMetricsInfo, targetUtilization int64) (utilizationRatio float64, currentUtilization int64) {
|
||||
metricsTotal := int64(0)
|
||||
for _, metricValue := range metrics {
|
||||
metricsTotal += metricValue
|
||||
for _, metric := range metrics {
|
||||
metricsTotal += metric.Value
|
||||
}
|
||||
|
||||
currentUtilization = metricsTotal / int64(len(metrics))
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/utilization_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/utilization_test.go
generated
vendored
@ -67,7 +67,7 @@ func (tc *metricUtilizationRatioTestCase) runTest(t *testing.T) {
|
||||
func TestGetResourceUtilizationRatioBaseCase(t *testing.T) {
|
||||
tc := resourceUtilizationRatioTestCase{
|
||||
metrics: PodMetricsInfo{
|
||||
"test-pod-0": 50, "test-pod-1": 76,
|
||||
"test-pod-0": {Value: 50}, "test-pod-1": {Value: 76},
|
||||
},
|
||||
requests: map[string]int64{
|
||||
"test-pod-0": 100, "test-pod-1": 100,
|
||||
@ -85,7 +85,7 @@ func TestGetResourceUtilizationRatioBaseCase(t *testing.T) {
|
||||
func TestGetResourceUtilizationRatioIgnorePodsWithNoRequest(t *testing.T) {
|
||||
tc := resourceUtilizationRatioTestCase{
|
||||
metrics: PodMetricsInfo{
|
||||
"test-pod-0": 50, "test-pod-1": 76, "test-pod-no-request": 100,
|
||||
"test-pod-0": {Value: 50}, "test-pod-1": {Value: 76}, "test-pod-no-request": {Value: 100},
|
||||
},
|
||||
requests: map[string]int64{
|
||||
"test-pod-0": 100, "test-pod-1": 100,
|
||||
@ -103,7 +103,7 @@ func TestGetResourceUtilizationRatioIgnorePodsWithNoRequest(t *testing.T) {
|
||||
func TestGetResourceUtilizationRatioExtraRequest(t *testing.T) {
|
||||
tc := resourceUtilizationRatioTestCase{
|
||||
metrics: PodMetricsInfo{
|
||||
"test-pod-0": 50, "test-pod-1": 76,
|
||||
"test-pod-0": {Value: 50}, "test-pod-1": {Value: 76},
|
||||
},
|
||||
requests: map[string]int64{
|
||||
"test-pod-0": 100, "test-pod-1": 100, "test-pod-extra-request": 500,
|
||||
@ -121,7 +121,7 @@ func TestGetResourceUtilizationRatioExtraRequest(t *testing.T) {
|
||||
func TestGetResourceUtilizationRatioNoRequests(t *testing.T) {
|
||||
tc := resourceUtilizationRatioTestCase{
|
||||
metrics: PodMetricsInfo{
|
||||
"test-pod-0": 50, "test-pod-1": 76,
|
||||
"test-pod-0": {Value: 50}, "test-pod-1": {Value: 76},
|
||||
},
|
||||
requests: map[string]int64{},
|
||||
targetUtilization: 50,
|
||||
@ -138,7 +138,7 @@ func TestGetResourceUtilizationRatioNoRequests(t *testing.T) {
|
||||
func TestGetMetricUtilizationRatioBaseCase(t *testing.T) {
|
||||
tc := metricUtilizationRatioTestCase{
|
||||
metrics: PodMetricsInfo{
|
||||
"test-pod-0": 5000, "test-pod-1": 10000,
|
||||
"test-pod-0": {Value: 5000}, "test-pod-1": {Value: 10000},
|
||||
},
|
||||
targetUtilization: 10000,
|
||||
expectedUtilizationRatio: .75,
|
||||
|
Reference in New Issue
Block a user