mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor files
This commit is contained in:
77
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD
generated
vendored
Normal file
77
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"interfaces.go",
|
||||
"legacy_metrics_client.go",
|
||||
"rest_metrics_client.go",
|
||||
"utilization.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics",
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"legacy_metrics_client_test.go",
|
||||
"rest_metrics_client_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/extensions/install:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/custom_metrics/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
45
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/interfaces.go
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/interfaces.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
// PodMetricsInfo contains pod metric values as a map from pod names to
|
||||
// metric values (the metric values are expected to be the metric as a milli-value)
|
||||
type PodMetricsInfo map[string]int64
|
||||
|
||||
// MetricsClient knows how to query a remote interface to retrieve container-level
|
||||
// resource metrics as well as pod-level arbitrary metrics
|
||||
type MetricsClient interface {
|
||||
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
|
||||
|
||||
// GetRawMetric gets the given metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
|
||||
|
||||
// GetObjectMetric gets the given metric (and an associated timestamp) for the given
|
||||
// object in the given namespace
|
||||
GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error)
|
||||
}
|
213
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go
generated
vendored
Normal file
213
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go
generated
vendored
Normal file
@ -0,0 +1,213 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientgov1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultHeapsterNamespace = "kube-system"
|
||||
DefaultHeapsterScheme = "http"
|
||||
DefaultHeapsterService = "heapster"
|
||||
DefaultHeapsterPort = "" // use the first exposed port on the service
|
||||
)
|
||||
|
||||
var heapsterQueryStart = -5 * time.Minute
|
||||
|
||||
type HeapsterMetricsClient struct {
|
||||
services v1core.ServiceInterface
|
||||
podsGetter v1core.PodsGetter
|
||||
heapsterScheme string
|
||||
heapsterService string
|
||||
heapsterPort string
|
||||
}
|
||||
|
||||
func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) MetricsClient {
|
||||
return &HeapsterMetricsClient{
|
||||
services: client.CoreV1().Services(namespace),
|
||||
podsGetter: client.CoreV1(),
|
||||
heapsterScheme: scheme,
|
||||
heapsterService: service,
|
||||
heapsterPort: port,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
|
||||
params := map[string]string{"labelSelector": selector.String()}
|
||||
|
||||
resultRaw, err := h.services.
|
||||
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, params).
|
||||
DoRaw()
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod resource metrics: %v", err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
err = json.Unmarshal(resultRaw, &metrics)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
|
||||
}
|
||||
|
||||
if len(metrics.Items) == 0 {
|
||||
return nil, time.Time{}, fmt.Errorf("no metrics returned from heapster")
|
||||
}
|
||||
|
||||
res := make(PodMetricsInfo, len(metrics.Items))
|
||||
|
||||
for _, m := range metrics.Items {
|
||||
podSum := int64(0)
|
||||
missing := len(m.Containers) == 0
|
||||
for _, c := range m.Containers {
|
||||
resValue, found := c.Usage[clientgov1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
continue
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
}
|
||||
|
||||
if !missing {
|
||||
res[m.Name] = int64(podSum)
|
||||
}
|
||||
}
|
||||
|
||||
timestamp := metrics.Items[0].Timestamp.Time
|
||||
|
||||
return res, timestamp, nil
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
podList, err := h.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
|
||||
}
|
||||
|
||||
if len(podList.Items) == 0 {
|
||||
return nil, time.Time{}, fmt.Errorf("no pods matched the provided selector")
|
||||
}
|
||||
|
||||
podNames := make([]string, len(podList.Items))
|
||||
for i, pod := range podList.Items {
|
||||
podNames[i] = pod.Name
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
startTime := now.Add(heapsterQueryStart)
|
||||
metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s",
|
||||
namespace,
|
||||
strings.Join(podNames, ","),
|
||||
metricName)
|
||||
|
||||
resultRaw, err := h.services.
|
||||
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}).
|
||||
DoRaw()
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod metrics: %v", err)
|
||||
}
|
||||
|
||||
var metrics heapster.MetricResultList
|
||||
err = json.Unmarshal(resultRaw, &metrics)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
if len(metrics.Items) != len(podNames) {
|
||||
// if we get too many metrics or two few metrics, we have no way of knowing which metric goes to which pod
|
||||
// (note that Heapster returns *empty* metric items when a pod does not exist or have that metric, so this
|
||||
// does not cover the "missing metric entry" case)
|
||||
return nil, time.Time{}, fmt.Errorf("requested metrics for %v pods, got metrics for %v", len(podNames), len(metrics.Items))
|
||||
}
|
||||
|
||||
var timestamp *time.Time
|
||||
res := make(PodMetricsInfo, len(metrics.Items))
|
||||
for i, podMetrics := range metrics.Items {
|
||||
val, podTimestamp, hadMetrics := collapseTimeSamples(podMetrics, time.Minute)
|
||||
if hadMetrics {
|
||||
res[podNames[i]] = val
|
||||
if timestamp == nil || podTimestamp.Before(*timestamp) {
|
||||
timestamp = &podTimestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if timestamp == nil {
|
||||
timestamp = &time.Time{}
|
||||
}
|
||||
|
||||
return res, *timestamp, nil
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error) {
|
||||
return 0, time.Time{}, fmt.Errorf("object metrics are not yet supported")
|
||||
}
|
||||
|
||||
func collapseTimeSamples(metrics heapster.MetricResult, duration time.Duration) (int64, time.Time, bool) {
|
||||
floatSum := float64(0)
|
||||
intSum := int64(0)
|
||||
intSumCount := 0
|
||||
floatSumCount := 0
|
||||
|
||||
var newest *heapster.MetricPoint // creation time of the newest sample for this pod
|
||||
for i, metricPoint := range metrics.Metrics {
|
||||
if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) {
|
||||
newest = &metrics.Metrics[i]
|
||||
}
|
||||
}
|
||||
if newest != nil {
|
||||
for _, metricPoint := range metrics.Metrics {
|
||||
if metricPoint.Timestamp.Add(duration).After(newest.Timestamp) {
|
||||
intSum += int64(metricPoint.Value)
|
||||
intSumCount++
|
||||
if metricPoint.FloatValue != nil {
|
||||
floatSum += *metricPoint.FloatValue
|
||||
floatSumCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if newest.FloatValue != nil {
|
||||
return int64(floatSum / float64(floatSumCount) * 1000), newest.Timestamp, true
|
||||
} else {
|
||||
return (intSum * 1000) / int64(intSumCount), newest.Timestamp, true
|
||||
}
|
||||
}
|
||||
|
||||
return 0, time.Time{}, false
|
||||
}
|
365
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/legacy_metrics_client_test.go
generated
vendored
Normal file
365
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/legacy_metrics_client_test.go
generated
vendored
Normal file
@ -0,0 +1,365 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC)
|
||||
|
||||
func (w fakeResponseWrapper) DoRaw() ([]byte, error) {
|
||||
return w.raw, nil
|
||||
}
|
||||
|
||||
func (w fakeResponseWrapper) Stream() (io.ReadCloser, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
|
||||
return fakeResponseWrapper{raw: raw}
|
||||
}
|
||||
|
||||
type fakeResponseWrapper struct {
|
||||
raw []byte
|
||||
}
|
||||
|
||||
// timestamp is used for establishing order on metricPoints
|
||||
type metricPoint struct {
|
||||
level uint64
|
||||
timestamp int
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
desiredMetricValues PodMetricsInfo
|
||||
desiredError error
|
||||
|
||||
replicas int
|
||||
targetTimestamp int
|
||||
reportedMetricsPoints [][]metricPoint
|
||||
reportedPodMetrics [][]int64
|
||||
|
||||
namespace string
|
||||
selector labels.Selector
|
||||
resourceName v1.ResourceName
|
||||
metricName string
|
||||
}
|
||||
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
namespace := "test-namespace"
|
||||
tc.namespace = namespace
|
||||
podNamePrefix := "test-pod"
|
||||
podLabels := map[string]string{"name": podNamePrefix}
|
||||
tc.selector = labels.SelectorFromSet(podLabels)
|
||||
|
||||
// it's a resource test if we have a resource name
|
||||
isResource := len(tc.resourceName) > 0
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < tc.replicas; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := buildPod(namespace, podName, podLabels, v1.PodRunning, "1024")
|
||||
obj.Items = append(obj.Items, pod)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
if isResource {
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
for i, containers := range tc.reportedPodMetrics {
|
||||
metric := metricsapi.PodMetrics{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
|
||||
Containers: []metricsapi.ContainerMetrics{},
|
||||
}
|
||||
for j, cpu := range containers {
|
||||
cm := metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
cpu,
|
||||
resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
}
|
||||
metric.Containers = append(metric.Containers, cm)
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
heapsterRawMemResponse, _ := json.Marshal(&metrics)
|
||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
} else {
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||
metrics := heapster.MetricResultList{}
|
||||
var latestTimestamp time.Time
|
||||
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
|
||||
var heapsterMetricPoints []heapster.MetricPoint
|
||||
for _, reportedMetricPoint := range reportedMetricPoints {
|
||||
timestamp := fixedTimestamp.Add(time.Duration(reportedMetricPoint.timestamp) * time.Minute)
|
||||
if latestTimestamp.Before(timestamp) {
|
||||
latestTimestamp = timestamp
|
||||
}
|
||||
heapsterMetricPoint := heapster.MetricPoint{Timestamp: timestamp, Value: reportedMetricPoint.level, FloatValue: nil}
|
||||
heapsterMetricPoints = append(heapsterMetricPoints, heapsterMetricPoint)
|
||||
}
|
||||
metric := heapster.MetricResult{
|
||||
Metrics: heapsterMetricPoints,
|
||||
LatestTimestamp: latestTimestamp,
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
heapsterRawMemResponse, _ := json.Marshal(&metrics)
|
||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
}
|
||||
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func buildPod(namespace, podName string, podLabels map[string]string, phase v1.PodPhase, request string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(request),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: phase,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *testCase) verifyResults(t *testing.T, metrics PodMetricsInfo, timestamp time.Time, err error) {
|
||||
if tc.desiredError != nil {
|
||||
assert.Error(t, err, "there should be an error retrieving the metrics")
|
||||
assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError), "the error message should be eas expected")
|
||||
return
|
||||
}
|
||||
assert.NoError(t, err, "there should be no error retrieving the metrics")
|
||||
assert.NotNil(t, metrics, "there should be metrics returned")
|
||||
|
||||
assert.Equal(t, tc.desiredMetricValues, metrics, "the metrics values should be as expected")
|
||||
|
||||
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
|
||||
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
|
||||
}
|
||||
|
||||
func (tc *testCase) runTest(t *testing.T) {
|
||||
testClient := tc.prepareTestClient(t)
|
||||
metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort)
|
||||
isResource := len(tc.resourceName) > 0
|
||||
if isResource {
|
||||
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector)
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
} else {
|
||||
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCPU(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: 1,
|
||||
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQPS(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 10000, "test-pod-1": 20000, "test-pod-2": 10000,
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: 1,
|
||||
reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsSumEqualZero(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 0, "test-pod-1": 0, "test-pod-2": 0,
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: 0,
|
||||
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUMoreMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 5,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
|
||||
"test-pod-3": 5000, "test-pod-4": 5000,
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: 10,
|
||||
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUMissingMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 4000,
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
reportedPodMetrics: [][]int64{{4000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsMissingMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 1"),
|
||||
metricName: "qps",
|
||||
targetTimestamp: 1,
|
||||
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsSuperfluousMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 6"),
|
||||
metricName: "qps",
|
||||
reportedMetricsPoints: [][]metricPoint{{{1000, 1}}, {{2000, 4}}, {{2000, 1}}, {{4000, 5}}, {{2000, 1}}, {{4000, 4}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUEmptyMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
||||
reportedMetricsPoints: [][]metricPoint{},
|
||||
reportedPodMetrics: [][]int64{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsEmptyEntries(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
metricName: "qps",
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 4000000, "test-pod-2": 2000000,
|
||||
},
|
||||
targetTimestamp: 4,
|
||||
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {}, {{2000, 4}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUZeroReplicas(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 0,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
||||
reportedPodMetrics: [][]int64{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 100, "test-pod-1": 700,
|
||||
},
|
||||
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func testCollapseTimeSamples(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := heapster.MetricResult{
|
||||
Metrics: []heapster.MetricPoint{
|
||||
{Timestamp: now, Value: 50, FloatValue: nil},
|
||||
{Timestamp: now.Add(-15 * time.Second), Value: 100, FloatValue: nil},
|
||||
{Timestamp: now.Add(-60 * time.Second), Value: 100000, FloatValue: nil}},
|
||||
LatestTimestamp: now,
|
||||
}
|
||||
|
||||
val, timestamp, hadMetrics := collapseTimeSamples(metrics, time.Minute)
|
||||
assert.True(t, hadMetrics, "should report that it received a populated list of metrics")
|
||||
assert.InEpsilon(t, float64(75), val, 0.1, "collapsed sample value should be as expected")
|
||||
assert.True(t, timestamp.Equal(now), "timestamp should be the current time (the newest)")
|
||||
}
|
141
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/rest_metrics_client.go
generated
vendored
Normal file
141
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/rest_metrics_client.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
customapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1"
|
||||
resourceclient "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1"
|
||||
customclient "k8s.io/metrics/pkg/client/custom_metrics"
|
||||
)
|
||||
|
||||
func NewRESTMetricsClient(resourceClient resourceclient.PodMetricsesGetter, customClient customclient.CustomMetricsClient) MetricsClient {
|
||||
return &restMetricsClient{
|
||||
&resourceMetricsClient{resourceClient},
|
||||
&customMetricsClient{customClient},
|
||||
}
|
||||
}
|
||||
|
||||
// restMetricsClient is a client which supports fetching
|
||||
// metrics from both the resource metrics API and the
|
||||
// custom metrics API.
|
||||
type restMetricsClient struct {
|
||||
*resourceMetricsClient
|
||||
*customMetricsClient
|
||||
}
|
||||
|
||||
// resourceMetricsClient implements the resource-metrics-related parts of MetricsClient,
|
||||
// using data from the resource metrics API.
|
||||
type resourceMetricsClient struct {
|
||||
client resourceclient.PodMetricsesGetter
|
||||
}
|
||||
|
||||
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
metrics, err := c.client.PodMetricses(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from API: %v", err)
|
||||
}
|
||||
|
||||
if len(metrics.Items) == 0 {
|
||||
return nil, time.Time{}, fmt.Errorf("no metrics returned from heapster")
|
||||
}
|
||||
|
||||
res := make(PodMetricsInfo, len(metrics.Items))
|
||||
|
||||
for _, m := range metrics.Items {
|
||||
podSum := int64(0)
|
||||
missing := len(m.Containers) == 0
|
||||
for _, c := range m.Containers {
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
break // containers loop
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
}
|
||||
|
||||
if !missing {
|
||||
res[m.Name] = int64(podSum)
|
||||
}
|
||||
}
|
||||
|
||||
timestamp := metrics.Items[0].Timestamp.Time
|
||||
|
||||
return res, timestamp, nil
|
||||
}
|
||||
|
||||
// customMetricsClient implements the custom-metrics-related parts of MetricsClient,
|
||||
// using data from the custom metrics API.
|
||||
type customMetricsClient struct {
|
||||
client customclient.CustomMetricsClient
|
||||
}
|
||||
|
||||
// GetRawMetric gets the given metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
func (c *customMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
metrics, err := c.client.NamespacedMetrics(namespace).GetForObjects(schema.GroupKind{Kind: "Pod"}, selector, metricName)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from API: %v", err)
|
||||
}
|
||||
|
||||
if len(metrics.Items) == 0 {
|
||||
return nil, time.Time{}, fmt.Errorf("no metrics returned from custom metrics API")
|
||||
}
|
||||
|
||||
res := make(PodMetricsInfo, len(metrics.Items))
|
||||
for _, m := range metrics.Items {
|
||||
res[m.DescribedObject.Name] = m.Value.MilliValue()
|
||||
}
|
||||
|
||||
timestamp := metrics.Items[0].Timestamp.Time
|
||||
|
||||
return res, timestamp, nil
|
||||
}
|
||||
|
||||
// GetObjectMetric gets the given metric (and an associated timestamp) for the given
|
||||
// object in the given namespace
|
||||
func (c *customMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error) {
|
||||
gvk := schema.FromAPIVersionAndKind(objectRef.APIVersion, objectRef.Kind)
|
||||
var metricValue *customapi.MetricValue
|
||||
var err error
|
||||
if gvk.Kind == "Namespace" && gvk.Group == "" {
|
||||
// handle namespace separately
|
||||
// NB: we ignore namespace name here, since CrossVersionObjectReference isn't
|
||||
// supposed to allow you to escape your namespace
|
||||
metricValue, err = c.client.RootScopedMetrics().GetForObject(gvk.GroupKind(), namespace, metricName)
|
||||
} else {
|
||||
metricValue, err = c.client.NamespacedMetrics(namespace).GetForObject(gvk.GroupKind(), objectRef.Name, metricName)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, time.Time{}, fmt.Errorf("unable to fetch metrics from API: %v", err)
|
||||
}
|
||||
|
||||
return metricValue.Value.MilliValue(), metricValue.Timestamp.Time, nil
|
||||
}
|
271
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go
generated
vendored
Normal file
271
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go
generated
vendored
Normal file
@ -0,0 +1,271 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
autoscalingapi "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
kv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
|
||||
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsfake "k8s.io/metrics/pkg/client/clientset_generated/clientset/fake"
|
||||
cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type restClientTestCase struct {
|
||||
desiredMetricValues PodMetricsInfo
|
||||
desiredError error
|
||||
|
||||
// "timestamps" here are actually the offset in minutes from a base timestamp
|
||||
targetTimestamp int
|
||||
reportedMetricPoints []metricPoint
|
||||
reportedPodMetrics [][]int64
|
||||
singleObject *autoscalingapi.CrossVersionObjectReference
|
||||
|
||||
namespace string
|
||||
selector labels.Selector
|
||||
resourceName v1.ResourceName
|
||||
metricName string
|
||||
}
|
||||
|
||||
func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clientset, *cmfake.FakeCustomMetricsClient) {
|
||||
namespace := "test-namespace"
|
||||
tc.namespace = namespace
|
||||
podNamePrefix := "test-pod"
|
||||
podLabels := map[string]string{"name": podNamePrefix}
|
||||
tc.selector = labels.SelectorFromSet(podLabels)
|
||||
|
||||
// it's a resource test if we have a resource name
|
||||
isResource := len(tc.resourceName) > 0
|
||||
|
||||
fakeMetricsClient := &metricsfake.Clientset{}
|
||||
fakeCMClient := &cmfake.FakeCustomMetricsClient{}
|
||||
|
||||
if isResource {
|
||||
fakeMetricsClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
metrics := &metricsapi.PodMetricsList{}
|
||||
for i, containers := range tc.reportedPodMetrics {
|
||||
metric := metricsapi.PodMetrics{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: namespace,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
|
||||
Containers: []metricsapi.ContainerMetrics{},
|
||||
}
|
||||
for j, cpu := range containers {
|
||||
cm := metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
cpu,
|
||||
resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
}
|
||||
metric.Containers = append(metric.Containers, cm)
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
return true, metrics, nil
|
||||
})
|
||||
} else {
|
||||
fakeCMClient.AddReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
getForAction := action.(cmfake.GetForAction)
|
||||
assert.Equal(t, tc.metricName, getForAction.GetMetricName(), "the metric requested should have matched the one specified")
|
||||
|
||||
if getForAction.GetName() == "*" {
|
||||
// multiple objects
|
||||
metrics := cmapi.MetricValueList{}
|
||||
assert.Equal(t, "pods", getForAction.GetResource().Resource, "type of object that we requested multiple metrics for should have been pods")
|
||||
|
||||
for i, metricPoint := range tc.reportedMetricPoints {
|
||||
timestamp := fixedTimestamp.Add(time.Duration(metricPoint.timestamp) * time.Minute)
|
||||
metric := cmapi.MetricValue{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
},
|
||||
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
|
||||
Timestamp: metav1.Time{Time: timestamp},
|
||||
MetricName: tc.metricName,
|
||||
}
|
||||
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
|
||||
return true, &metrics, nil
|
||||
} else {
|
||||
name := getForAction.GetName()
|
||||
mapper := legacyscheme.Registry.RESTMapper()
|
||||
assert.NotNil(t, tc.singleObject, "should have only requested a single-object metric when we asked for metrics for a single object")
|
||||
gk := schema.FromAPIVersionAndKind(tc.singleObject.APIVersion, tc.singleObject.Kind).GroupKind()
|
||||
mapping, err := mapper.RESTMapping(gk)
|
||||
if err != nil {
|
||||
return true, nil, fmt.Errorf("unable to get mapping for %s: %v", gk.String(), err)
|
||||
}
|
||||
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
||||
|
||||
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
|
||||
assert.Equal(t, tc.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
|
||||
metricPoint := tc.reportedMetricPoints[0]
|
||||
timestamp := fixedTimestamp.Add(time.Duration(metricPoint.timestamp) * time.Minute)
|
||||
|
||||
metrics := &cmapi.MetricValueList{
|
||||
Items: []cmapi.MetricValue{
|
||||
{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: tc.singleObject.Kind,
|
||||
APIVersion: tc.singleObject.APIVersion,
|
||||
Name: tc.singleObject.Name,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: timestamp},
|
||||
MetricName: tc.metricName,
|
||||
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return true, metrics, nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return fakeMetricsClient, fakeCMClient
|
||||
}
|
||||
|
||||
func (tc *restClientTestCase) verifyResults(t *testing.T, metrics PodMetricsInfo, timestamp time.Time, err error) {
|
||||
if tc.desiredError != nil {
|
||||
assert.Error(t, err, "there should be an error retrieving the metrics")
|
||||
assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError), "the error message should be eas expected")
|
||||
return
|
||||
}
|
||||
assert.NoError(t, err, "there should be no error retrieving the metrics")
|
||||
assert.NotNil(t, metrics, "there should be metrics returned")
|
||||
|
||||
assert.Equal(t, tc.desiredMetricValues, metrics, "the metrics values should be as expected")
|
||||
|
||||
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
|
||||
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
|
||||
}
|
||||
|
||||
func (tc *restClientTestCase) runTest(t *testing.T) {
|
||||
testMetricsClient, testCMClient := tc.prepareTestClient(t)
|
||||
metricsClient := NewRESTMetricsClient(testMetricsClient.MetricsV1beta1(), testCMClient)
|
||||
isResource := len(tc.resourceName) > 0
|
||||
if isResource {
|
||||
info, timestamp, err := metricsClient.GetResourceMetric(kv1.ResourceName(tc.resourceName), tc.namespace, tc.selector)
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
} else if tc.singleObject == nil {
|
||||
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
} else {
|
||||
val, timestamp, err := metricsClient.GetObjectMetric(tc.metricName, tc.namespace, tc.singleObject)
|
||||
info := PodMetricsInfo{tc.singleObject.Name: val}
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRESTClientCPU(t *testing.T) {
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: 1,
|
||||
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestRESTClientQPS(t *testing.T) {
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 10000, "test-pod-1": 20000, "test-pod-2": 10000,
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: 1,
|
||||
reportedMetricPoints: []metricPoint{{10000, 1}, {20000, 1}, {10000, 1}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestRESTClientSingleObject(t *testing.T) {
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{"some-dep": 10},
|
||||
metricName: "queue-length",
|
||||
targetTimestamp: 1,
|
||||
reportedMetricPoints: []metricPoint{{10, 1}},
|
||||
singleObject: &autoscalingapi.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: "some-dep",
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestRESTClientQpsSumEqualZero(t *testing.T) {
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 0, "test-pod-1": 0, "test-pod-2": 0,
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: 0,
|
||||
reportedMetricPoints: []metricPoint{{0, 0}, {0, 0}, {0, 0}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestRESTClientCPUEmptyMetrics(t *testing.T) {
|
||||
tc := restClientTestCase{
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
||||
reportedMetricPoints: []metricPoint{},
|
||||
reportedPodMetrics: [][]int64{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestRESTClientCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
tc := restClientTestCase{
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": 100, "test-pod-1": 700,
|
||||
},
|
||||
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
66
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/utilization.go
generated
vendored
Normal file
66
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/utilization.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// GetResourceUtilizationRatio takes in a set of metrics, a set of matching requests,
|
||||
// and a target utilization percentage, and calculates the ratio of
|
||||
// desired to actual utilization (returning that, the actual utilization, and the raw average value)
|
||||
func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int64, targetUtilization int32) (utilizationRatio float64, currentUtilization int32, rawAverageValue int64, err error) {
|
||||
metricsTotal := int64(0)
|
||||
requestsTotal := int64(0)
|
||||
numEntries := 0
|
||||
|
||||
for podName, metricValue := range metrics {
|
||||
request, hasRequest := requests[podName]
|
||||
if !hasRequest {
|
||||
// we check for missing requests elsewhere, so assuming missing requests == extraneous metrics
|
||||
continue
|
||||
}
|
||||
|
||||
metricsTotal += metricValue
|
||||
requestsTotal += request
|
||||
numEntries++
|
||||
}
|
||||
|
||||
// if the set of requests is completely disjoint from the set of metrics,
|
||||
// then we could have an issue where the requests total is zero
|
||||
if requestsTotal == 0 {
|
||||
return 0, 0, 0, fmt.Errorf("no metrics returned matched known pods")
|
||||
}
|
||||
|
||||
currentUtilization = int32((metricsTotal * 100) / requestsTotal)
|
||||
|
||||
return float64(currentUtilization) / float64(targetUtilization), currentUtilization, metricsTotal / int64(numEntries), nil
|
||||
}
|
||||
|
||||
// GetMetricUtilizationRatio takes in a set of metrics and a target utilization value,
|
||||
// and calcuates the ratio of desired to actual utilization
|
||||
// (returning that and the actual utilization)
|
||||
func GetMetricUtilizationRatio(metrics PodMetricsInfo, targetUtilization int64) (utilizationRatio float64, currentUtilization int64) {
|
||||
metricsTotal := int64(0)
|
||||
for _, metricValue := range metrics {
|
||||
metricsTotal += metricValue
|
||||
}
|
||||
|
||||
currentUtilization = metricsTotal / int64(len(metrics))
|
||||
|
||||
return float64(currentUtilization) / float64(targetUtilization), currentUtilization
|
||||
}
|
Reference in New Issue
Block a user