mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
72
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/BUILD
generated
vendored
Normal file
72
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/BUILD
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"top.go",
|
||||
"top_node.go",
|
||||
"top_pod.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/top",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubectl/metricsutil:go_default_library",
|
||||
"//pkg/kubectl/util/i18n:go_default_library",
|
||||
"//pkg/kubectl/util/templates:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned:go_default_library",
|
||||
"//vendor/github.com/spf13/cobra:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"top_node_test.go",
|
||||
"top_pod_test.go",
|
||||
"top_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubectl/cmd/testing:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubectl/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake:go_default_library",
|
||||
"//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
71
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top.go
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package top
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/templates"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
supportedMetricsAPIVersions = []string{
|
||||
"v1beta1",
|
||||
}
|
||||
topLong = templates.LongDesc(i18n.T(`
|
||||
Display Resource (CPU/Memory/Storage) usage.
|
||||
|
||||
The top command allows you to see the resource consumption for nodes or pods.
|
||||
|
||||
This command requires Heapster to be correctly configured and working on the server. `))
|
||||
)
|
||||
|
||||
func NewCmdTop(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "top",
|
||||
Short: i18n.T("Display Resource (CPU/Memory/Storage) usage."),
|
||||
Long: topLong,
|
||||
Run: cmdutil.DefaultSubCommandRun(streams.ErrOut),
|
||||
}
|
||||
|
||||
// create subcommands
|
||||
cmd.AddCommand(NewCmdTopNode(f, nil, streams))
|
||||
cmd.AddCommand(NewCmdTopPod(f, nil, streams))
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func SupportedMetricsAPIVersionAvailable(discoveredAPIGroups *metav1.APIGroupList) bool {
|
||||
for _, discoveredAPIGroup := range discoveredAPIGroups.Groups {
|
||||
if discoveredAPIGroup.Name != metricsapi.GroupName {
|
||||
continue
|
||||
}
|
||||
for _, version := range discoveredAPIGroup.Versions {
|
||||
for _, supportedVersion := range supportedMetricsAPIVersions {
|
||||
if version.Version == supportedVersion {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
242
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_node.go
generated
vendored
Normal file
242
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_node.go
generated
vendored
Normal file
@ -0,0 +1,242 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package top
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/discovery"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/metricsutil"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/templates"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics"
|
||||
metricsV1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsclientset "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
// TopNodeOptions contains all the options for running the top-node cli command.
|
||||
type TopNodeOptions struct {
|
||||
ResourceName string
|
||||
Selector string
|
||||
NoHeaders bool
|
||||
NodeClient corev1client.CoreV1Interface
|
||||
HeapsterOptions HeapsterTopOptions
|
||||
Client *metricsutil.HeapsterMetricsClient
|
||||
Printer *metricsutil.TopCmdPrinter
|
||||
DiscoveryClient discovery.DiscoveryInterface
|
||||
MetricsClient metricsclientset.Interface
|
||||
|
||||
genericclioptions.IOStreams
|
||||
}
|
||||
|
||||
type HeapsterTopOptions struct {
|
||||
Namespace string
|
||||
Service string
|
||||
Scheme string
|
||||
Port string
|
||||
}
|
||||
|
||||
func (o *HeapsterTopOptions) Bind(flags *pflag.FlagSet) {
|
||||
if len(o.Namespace) == 0 {
|
||||
o.Namespace = metricsutil.DefaultHeapsterNamespace
|
||||
}
|
||||
if len(o.Service) == 0 {
|
||||
o.Service = metricsutil.DefaultHeapsterService
|
||||
}
|
||||
if len(o.Scheme) == 0 {
|
||||
o.Scheme = metricsutil.DefaultHeapsterScheme
|
||||
}
|
||||
if len(o.Port) == 0 {
|
||||
o.Port = metricsutil.DefaultHeapsterPort
|
||||
}
|
||||
|
||||
flags.StringVar(&o.Namespace, "heapster-namespace", o.Namespace, "Namespace Heapster service is located in")
|
||||
flags.StringVar(&o.Service, "heapster-service", o.Service, "Name of Heapster service")
|
||||
flags.StringVar(&o.Scheme, "heapster-scheme", o.Scheme, "Scheme (http or https) to connect to Heapster as")
|
||||
flags.StringVar(&o.Port, "heapster-port", o.Port, "Port name in service to use")
|
||||
}
|
||||
|
||||
var (
|
||||
topNodeLong = templates.LongDesc(i18n.T(`
|
||||
Display Resource (CPU/Memory/Storage) usage of nodes.
|
||||
|
||||
The top-node command allows you to see the resource consumption of nodes.`))
|
||||
|
||||
topNodeExample = templates.Examples(i18n.T(`
|
||||
# Show metrics for all nodes
|
||||
kubectl top node
|
||||
|
||||
# Show metrics for a given node
|
||||
kubectl top node NODE_NAME`))
|
||||
)
|
||||
|
||||
func NewCmdTopNode(f cmdutil.Factory, o *TopNodeOptions, streams genericclioptions.IOStreams) *cobra.Command {
|
||||
if o == nil {
|
||||
o = &TopNodeOptions{
|
||||
IOStreams: streams,
|
||||
}
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "node [NAME | -l label]",
|
||||
DisableFlagsInUseLine: true,
|
||||
Short: i18n.T("Display Resource (CPU/Memory/Storage) usage of nodes"),
|
||||
Long: topNodeLong,
|
||||
Example: topNodeExample,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmdutil.CheckErr(o.Complete(f, cmd, args))
|
||||
cmdutil.CheckErr(o.Validate())
|
||||
cmdutil.CheckErr(o.RunTopNode())
|
||||
},
|
||||
Aliases: []string{"nodes", "no"},
|
||||
}
|
||||
cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
cmd.Flags().BoolVar(&o.NoHeaders, "no-headers", o.NoHeaders, "If present, print output without headers")
|
||||
|
||||
o.HeapsterOptions.Bind(cmd.Flags())
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (o *TopNodeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
o.ResourceName = args[0]
|
||||
} else if len(args) > 1 {
|
||||
return cmdutil.UsageErrorf(cmd, "%s", cmd.Use)
|
||||
}
|
||||
|
||||
clientset, err := f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.DiscoveryClient = clientset.DiscoveryClient
|
||||
|
||||
config, err := f.ToRESTConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.MetricsClient, err = metricsclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.NodeClient = clientset.CoreV1()
|
||||
o.Client = metricsutil.NewHeapsterMetricsClient(clientset.CoreV1(), o.HeapsterOptions.Namespace, o.HeapsterOptions.Scheme, o.HeapsterOptions.Service, o.HeapsterOptions.Port)
|
||||
|
||||
o.Printer = metricsutil.NewTopCmdPrinter(o.Out)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *TopNodeOptions) Validate() error {
|
||||
if len(o.ResourceName) > 0 && len(o.Selector) > 0 {
|
||||
return errors.New("only one of NAME or --selector can be provided")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o TopNodeOptions) RunTopNode() error {
|
||||
var err error
|
||||
selector := labels.Everything()
|
||||
if len(o.Selector) > 0 {
|
||||
selector, err = labels.Parse(o.Selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
apiGroups, err := o.DiscoveryClient.ServerGroups()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metricsAPIAvailable := SupportedMetricsAPIVersionAvailable(apiGroups)
|
||||
|
||||
metrics := &metricsapi.NodeMetricsList{}
|
||||
if metricsAPIAvailable {
|
||||
metrics, err = getNodeMetricsFromMetricsAPI(o.MetricsClient, o.ResourceName, selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
metrics, err = o.Client.GetNodeMetrics(o.ResourceName, selector.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(metrics.Items) == 0 {
|
||||
return errors.New("metrics not available yet")
|
||||
}
|
||||
|
||||
var nodes []v1.Node
|
||||
if len(o.ResourceName) > 0 {
|
||||
node, err := o.NodeClient.Nodes().Get(o.ResourceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodes = append(nodes, *node)
|
||||
} else {
|
||||
nodeList, err := o.NodeClient.Nodes().List(metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodes = append(nodes, nodeList.Items...)
|
||||
}
|
||||
|
||||
allocatable := make(map[string]v1.ResourceList)
|
||||
|
||||
for _, n := range nodes {
|
||||
allocatable[n.Name] = n.Status.Allocatable
|
||||
}
|
||||
|
||||
return o.Printer.PrintNodeMetrics(metrics.Items, allocatable, o.NoHeaders)
|
||||
}
|
||||
|
||||
func getNodeMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, resourceName string, selector labels.Selector) (*metricsapi.NodeMetricsList, error) {
|
||||
var err error
|
||||
versionedMetrics := &metricsV1beta1api.NodeMetricsList{}
|
||||
mc := metricsClient.Metrics()
|
||||
nm := mc.NodeMetricses()
|
||||
if resourceName != "" {
|
||||
m, err := nm.Get(resourceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
versionedMetrics.Items = []metricsV1beta1api.NodeMetrics{*m}
|
||||
} else {
|
||||
versionedMetrics, err = nm.List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
metrics := &metricsapi.NodeMetricsList{}
|
||||
err = metricsV1beta1api.Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(versionedMetrics, metrics, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
496
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_node_test.go
generated
vendored
Normal file
496
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_node_test.go
generated
vendored
Normal file
@ -0,0 +1,496 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package top
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"net/url"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/rest/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing"
|
||||
"k8s.io/kubernetes/pkg/kubectl/scheme"
|
||||
metricsv1alpha1api "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
)
|
||||
|
||||
const (
|
||||
apiPrefix = "api"
|
||||
apiVersion = "v1"
|
||||
)
|
||||
|
||||
func TestTopNodeAllMetrics(t *testing.T) {
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
metrics, nodes := testNodeV1alpha1MetricsData()
|
||||
expectedMetricsPath := fmt.Sprintf("%s/%s/nodes", baseMetricsAddress, metricsApiVersion)
|
||||
expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion)
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace("test")
|
||||
defer tf.Cleanup()
|
||||
|
||||
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
|
||||
ns := scheme.Codecs
|
||||
|
||||
tf.Client = &fake.RESTClient{
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m := req.URL.Path, req.Method; {
|
||||
case p == "/api":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apibody)))}, nil
|
||||
case p == "/apis":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apisbody)))}, nil
|
||||
case p == expectedMetricsPath && m == "GET":
|
||||
body, err := marshallBody(metrics)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: body}, nil
|
||||
case p == expectedNodePath && m == "GET":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, nodes)}, nil
|
||||
default:
|
||||
t.Fatalf("unexpected request: %#v\nGot URL: %#v\nExpected path: %#v", req, req.URL, expectedMetricsPath)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
|
||||
|
||||
cmd := NewCmdTopNode(tf, nil, streams)
|
||||
cmd.Flags().Set("no-headers", "true")
|
||||
cmd.Run(cmd, []string{})
|
||||
|
||||
// Check the presence of node names in the output.
|
||||
result := buf.String()
|
||||
for _, m := range metrics.Items {
|
||||
if !strings.Contains(result, m.Name) {
|
||||
t.Errorf("missing metrics for %s: \n%s", m.Name, result)
|
||||
}
|
||||
}
|
||||
if strings.Contains(result, "MEMORY") {
|
||||
t.Errorf("should not print headers with --no-headers option set:\n%s\n", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopNodeAllMetricsCustomDefaults(t *testing.T) {
|
||||
customBaseHeapsterServiceAddress := "/api/v1/namespaces/custom-namespace/services/https:custom-heapster-service:/proxy"
|
||||
customBaseMetricsAddress := customBaseHeapsterServiceAddress + "/apis/metrics"
|
||||
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
metrics, nodes := testNodeV1alpha1MetricsData()
|
||||
expectedMetricsPath := fmt.Sprintf("%s/%s/nodes", customBaseMetricsAddress, metricsApiVersion)
|
||||
expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion)
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace("test")
|
||||
defer tf.Cleanup()
|
||||
|
||||
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
|
||||
ns := scheme.Codecs
|
||||
|
||||
tf.Client = &fake.RESTClient{
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m := req.URL.Path, req.Method; {
|
||||
case p == "/api":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apibody)))}, nil
|
||||
case p == "/apis":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apisbody)))}, nil
|
||||
case p == expectedMetricsPath && m == "GET":
|
||||
body, err := marshallBody(metrics)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: body}, nil
|
||||
case p == expectedNodePath && m == "GET":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, nodes)}, nil
|
||||
default:
|
||||
t.Fatalf("unexpected request: %#v\nGot URL: %#v\nExpected path: %#v", req, req.URL, expectedMetricsPath)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
|
||||
|
||||
opts := &TopNodeOptions{
|
||||
HeapsterOptions: HeapsterTopOptions{
|
||||
Namespace: "custom-namespace",
|
||||
Scheme: "https",
|
||||
Service: "custom-heapster-service",
|
||||
},
|
||||
IOStreams: streams,
|
||||
}
|
||||
cmd := NewCmdTopNode(tf, opts, streams)
|
||||
cmd.Run(cmd, []string{})
|
||||
|
||||
// Check the presence of node names in the output.
|
||||
result := buf.String()
|
||||
for _, m := range metrics.Items {
|
||||
if !strings.Contains(result, m.Name) {
|
||||
t.Errorf("missing metrics for %s: \n%s", m.Name, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopNodeWithNameMetrics(t *testing.T) {
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
metrics, nodes := testNodeV1alpha1MetricsData()
|
||||
expectedMetrics := metrics.Items[0]
|
||||
expectedNode := nodes.Items[0]
|
||||
nonExpectedMetrics := metricsv1alpha1api.NodeMetricsList{
|
||||
ListMeta: metrics.ListMeta,
|
||||
Items: metrics.Items[1:],
|
||||
}
|
||||
expectedPath := fmt.Sprintf("%s/%s/nodes/%s", baseMetricsAddress, metricsApiVersion, expectedMetrics.Name)
|
||||
expectedNodePath := fmt.Sprintf("/%s/%s/nodes/%s", apiPrefix, apiVersion, expectedMetrics.Name)
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace("test")
|
||||
defer tf.Cleanup()
|
||||
|
||||
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
|
||||
ns := scheme.Codecs
|
||||
|
||||
tf.Client = &fake.RESTClient{
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m := req.URL.Path, req.Method; {
|
||||
case p == "/api":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apibody)))}, nil
|
||||
case p == "/apis":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apisbody)))}, nil
|
||||
case p == expectedPath && m == "GET":
|
||||
body, err := marshallBody(expectedMetrics)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: body}, nil
|
||||
case p == expectedNodePath && m == "GET":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, &expectedNode)}, nil
|
||||
default:
|
||||
t.Fatalf("unexpected request: %#v\nGot URL: %#v\nExpected path: %#v", req, req.URL, expectedPath)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
|
||||
|
||||
cmd := NewCmdTopNode(tf, nil, streams)
|
||||
cmd.Run(cmd, []string{expectedMetrics.Name})
|
||||
|
||||
// Check the presence of node names in the output.
|
||||
result := buf.String()
|
||||
if !strings.Contains(result, expectedMetrics.Name) {
|
||||
t.Errorf("missing metrics for %s: \n%s", expectedMetrics.Name, result)
|
||||
}
|
||||
for _, m := range nonExpectedMetrics.Items {
|
||||
if strings.Contains(result, m.Name) {
|
||||
t.Errorf("unexpected metrics for %s: \n%s", m.Name, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopNodeWithLabelSelectorMetrics(t *testing.T) {
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
metrics, nodes := testNodeV1alpha1MetricsData()
|
||||
expectedMetrics := metricsv1alpha1api.NodeMetricsList{
|
||||
ListMeta: metrics.ListMeta,
|
||||
Items: metrics.Items[0:1],
|
||||
}
|
||||
expectedNodes := v1.NodeList{
|
||||
ListMeta: nodes.ListMeta,
|
||||
Items: nodes.Items[0:1],
|
||||
}
|
||||
nonExpectedMetrics := metricsv1alpha1api.NodeMetricsList{
|
||||
ListMeta: metrics.ListMeta,
|
||||
Items: metrics.Items[1:],
|
||||
}
|
||||
label := "key=value"
|
||||
expectedPath := fmt.Sprintf("%s/%s/nodes", baseMetricsAddress, metricsApiVersion)
|
||||
expectedQuery := fmt.Sprintf("labelSelector=%s", url.QueryEscape(label))
|
||||
expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion)
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace("test")
|
||||
defer tf.Cleanup()
|
||||
|
||||
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
|
||||
ns := scheme.Codecs
|
||||
|
||||
tf.Client = &fake.RESTClient{
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m, q := req.URL.Path, req.Method, req.URL.RawQuery; {
|
||||
case p == "/api":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apibody)))}, nil
|
||||
case p == "/apis":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apisbody)))}, nil
|
||||
case p == expectedPath && m == "GET" && q == expectedQuery:
|
||||
body, err := marshallBody(expectedMetrics)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: body}, nil
|
||||
case p == expectedNodePath && m == "GET":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, &expectedNodes)}, nil
|
||||
default:
|
||||
t.Fatalf("unexpected request: %#v\nGot URL: %#v\nExpected path: %#v", req, req.URL, expectedPath)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
|
||||
|
||||
cmd := NewCmdTopNode(tf, nil, streams)
|
||||
cmd.Flags().Set("selector", label)
|
||||
cmd.Run(cmd, []string{})
|
||||
|
||||
// Check the presence of node names in the output.
|
||||
result := buf.String()
|
||||
for _, m := range expectedMetrics.Items {
|
||||
if !strings.Contains(result, m.Name) {
|
||||
t.Errorf("missing metrics for %s: \n%s", m.Name, result)
|
||||
}
|
||||
}
|
||||
for _, m := range nonExpectedMetrics.Items {
|
||||
if strings.Contains(result, m.Name) {
|
||||
t.Errorf("unexpected metrics for %s: \n%s", m.Name, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopNodeAllMetricsFromMetricsServer(t *testing.T) {
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
expectedMetrics, nodes := testNodeV1beta1MetricsData()
|
||||
expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion)
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace("test")
|
||||
defer tf.Cleanup()
|
||||
|
||||
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
|
||||
ns := scheme.Codecs
|
||||
|
||||
tf.Client = &fake.RESTClient{
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m := req.URL.Path, req.Method; {
|
||||
case p == "/api":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apibody)))}, nil
|
||||
case p == "/apis":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apisbodyWithMetrics)))}, nil
|
||||
case p == expectedNodePath && m == "GET":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, nodes)}, nil
|
||||
default:
|
||||
t.Fatalf("unexpected request: %#v\nGot URL: %#v\n", req, req.URL)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
fakemetricsClientset := &metricsfake.Clientset{}
|
||||
fakemetricsClientset.AddReactor("list", "nodes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, expectedMetrics, nil
|
||||
})
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
|
||||
|
||||
cmd := NewCmdTopNode(tf, nil, streams)
|
||||
|
||||
// TODO in the long run, we want to test most of our commands like this. Wire the options struct with specific mocks
|
||||
// TODO then check the particular Run functionality and harvest results from fake clients
|
||||
cmdOptions := &TopNodeOptions{
|
||||
IOStreams: streams,
|
||||
}
|
||||
if err := cmdOptions.Complete(tf, cmd, []string{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cmdOptions.MetricsClient = fakemetricsClientset
|
||||
if err := cmdOptions.Validate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cmdOptions.RunTopNode(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check the presence of node names in the output.
|
||||
result := buf.String()
|
||||
for _, m := range expectedMetrics.Items {
|
||||
if !strings.Contains(result, m.Name) {
|
||||
t.Errorf("missing metrics for %s: \n%s", m.Name, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopNodeWithNameMetricsFromMetricsServer(t *testing.T) {
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
metrics, nodes := testNodeV1beta1MetricsData()
|
||||
expectedMetrics := metrics.Items[0]
|
||||
expectedNode := nodes.Items[0]
|
||||
nonExpectedMetrics := metricsv1beta1api.NodeMetricsList{
|
||||
ListMeta: metrics.ListMeta,
|
||||
Items: metrics.Items[1:],
|
||||
}
|
||||
expectedNodePath := fmt.Sprintf("/%s/%s/nodes/%s", apiPrefix, apiVersion, expectedMetrics.Name)
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace("test")
|
||||
defer tf.Cleanup()
|
||||
|
||||
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
|
||||
ns := scheme.Codecs
|
||||
|
||||
tf.Client = &fake.RESTClient{
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m := req.URL.Path, req.Method; {
|
||||
case p == "/api":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apibody)))}, nil
|
||||
case p == "/apis":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apisbodyWithMetrics)))}, nil
|
||||
case p == expectedNodePath && m == "GET":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, &expectedNode)}, nil
|
||||
default:
|
||||
t.Fatalf("unexpected request: %#v\nGot URL: %#v\n", req, req.URL)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
fakemetricsClientset := &metricsfake.Clientset{}
|
||||
fakemetricsClientset.AddReactor("get", "nodes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &expectedMetrics, nil
|
||||
})
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
|
||||
|
||||
cmd := NewCmdTopNode(tf, nil, streams)
|
||||
|
||||
// TODO in the long run, we want to test most of our commands like this. Wire the options struct with specific mocks
|
||||
// TODO then check the particular Run functionality and harvest results from fake clients
|
||||
cmdOptions := &TopNodeOptions{
|
||||
IOStreams: streams,
|
||||
}
|
||||
if err := cmdOptions.Complete(tf, cmd, []string{expectedMetrics.Name}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cmdOptions.MetricsClient = fakemetricsClientset
|
||||
if err := cmdOptions.Validate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cmdOptions.RunTopNode(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check the presence of node names in the output.
|
||||
result := buf.String()
|
||||
if !strings.Contains(result, expectedMetrics.Name) {
|
||||
t.Errorf("missing metrics for %s: \n%s", expectedMetrics.Name, result)
|
||||
}
|
||||
for _, m := range nonExpectedMetrics.Items {
|
||||
if strings.Contains(result, m.Name) {
|
||||
t.Errorf("unexpected metrics for %s: \n%s", m.Name, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopNodeWithLabelSelectorMetricsFromMetricsServer(t *testing.T) {
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
metrics, nodes := testNodeV1beta1MetricsData()
|
||||
expectedMetrics := &metricsv1beta1api.NodeMetricsList{
|
||||
ListMeta: metrics.ListMeta,
|
||||
Items: metrics.Items[0:1],
|
||||
}
|
||||
expectedNodes := v1.NodeList{
|
||||
ListMeta: nodes.ListMeta,
|
||||
Items: nodes.Items[0:1],
|
||||
}
|
||||
nonExpectedMetrics := &metricsv1beta1api.NodeMetricsList{
|
||||
ListMeta: metrics.ListMeta,
|
||||
Items: metrics.Items[1:],
|
||||
}
|
||||
label := "key=value"
|
||||
expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion)
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace("test")
|
||||
defer tf.Cleanup()
|
||||
|
||||
codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
|
||||
ns := scheme.Codecs
|
||||
|
||||
tf.Client = &fake.RESTClient{
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m, _ := req.URL.Path, req.Method, req.URL.RawQuery; {
|
||||
case p == "/api":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apibody)))}, nil
|
||||
case p == "/apis":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apisbodyWithMetrics)))}, nil
|
||||
case p == expectedNodePath && m == "GET":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, &expectedNodes)}, nil
|
||||
default:
|
||||
t.Fatalf("unexpected request: %#v\nGot URL: %#v\n", req, req.URL)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
|
||||
fakemetricsClientset := &metricsfake.Clientset{}
|
||||
fakemetricsClientset.AddReactor("list", "nodes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, expectedMetrics, nil
|
||||
})
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
|
||||
|
||||
cmd := NewCmdTopNode(tf, nil, streams)
|
||||
cmd.Flags().Set("selector", label)
|
||||
|
||||
// TODO in the long run, we want to test most of our commands like this. Wire the options struct with specific mocks
|
||||
// TODO then check the particular Run functionality and harvest results from fake clients
|
||||
cmdOptions := &TopNodeOptions{
|
||||
IOStreams: streams,
|
||||
}
|
||||
if err := cmdOptions.Complete(tf, cmd, []string{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cmdOptions.MetricsClient = fakemetricsClientset
|
||||
if err := cmdOptions.Validate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cmdOptions.RunTopNode(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check the presence of node names in the output.
|
||||
result := buf.String()
|
||||
for _, m := range expectedMetrics.Items {
|
||||
if !strings.Contains(result, m.Name) {
|
||||
t.Errorf("missing metrics for %s: \n%s", m.Name, result)
|
||||
}
|
||||
}
|
||||
for _, m := range nonExpectedMetrics.Items {
|
||||
if strings.Contains(result, m.Name) {
|
||||
t.Errorf("unexpected metrics for %s: \n%s", m.Name, result)
|
||||
}
|
||||
}
|
||||
}
|
265
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_pod.go
generated
vendored
Normal file
265
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_pod.go
generated
vendored
Normal file
@ -0,0 +1,265 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package top
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/discovery"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/metricsutil"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/templates"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics"
|
||||
metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsclientset "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type TopPodOptions struct {
|
||||
ResourceName string
|
||||
Namespace string
|
||||
Selector string
|
||||
AllNamespaces bool
|
||||
PrintContainers bool
|
||||
NoHeaders bool
|
||||
PodClient corev1client.PodsGetter
|
||||
HeapsterOptions HeapsterTopOptions
|
||||
Client *metricsutil.HeapsterMetricsClient
|
||||
Printer *metricsutil.TopCmdPrinter
|
||||
DiscoveryClient discovery.DiscoveryInterface
|
||||
MetricsClient metricsclientset.Interface
|
||||
|
||||
genericclioptions.IOStreams
|
||||
}
|
||||
|
||||
const metricsCreationDelay = 2 * time.Minute
|
||||
|
||||
var (
|
||||
topPodLong = templates.LongDesc(i18n.T(`
|
||||
Display Resource (CPU/Memory/Storage) usage of pods.
|
||||
|
||||
The 'top pod' command allows you to see the resource consumption of pods.
|
||||
|
||||
Due to the metrics pipeline delay, they may be unavailable for a few minutes
|
||||
since pod creation.`))
|
||||
|
||||
topPodExample = templates.Examples(i18n.T(`
|
||||
# Show metrics for all pods in the default namespace
|
||||
kubectl top pod
|
||||
|
||||
# Show metrics for all pods in the given namespace
|
||||
kubectl top pod --namespace=NAMESPACE
|
||||
|
||||
# Show metrics for a given pod and its containers
|
||||
kubectl top pod POD_NAME --containers
|
||||
|
||||
# Show metrics for the pods defined by label name=myLabel
|
||||
kubectl top pod -l name=myLabel`))
|
||||
)
|
||||
|
||||
func NewCmdTopPod(f cmdutil.Factory, o *TopPodOptions, streams genericclioptions.IOStreams) *cobra.Command {
|
||||
if o == nil {
|
||||
o = &TopPodOptions{
|
||||
IOStreams: streams,
|
||||
}
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "pod [NAME | -l label]",
|
||||
DisableFlagsInUseLine: true,
|
||||
Short: i18n.T("Display Resource (CPU/Memory/Storage) usage of pods"),
|
||||
Long: topPodLong,
|
||||
Example: topPodExample,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmdutil.CheckErr(o.Complete(f, cmd, args))
|
||||
cmdutil.CheckErr(o.Validate())
|
||||
cmdutil.CheckErr(o.RunTopPod())
|
||||
},
|
||||
Aliases: []string{"pods", "po"},
|
||||
}
|
||||
cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
cmd.Flags().BoolVar(&o.PrintContainers, "containers", o.PrintContainers, "If present, print usage of containers within a pod.")
|
||||
cmd.Flags().BoolVar(&o.AllNamespaces, "all-namespaces", o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.")
|
||||
cmd.Flags().BoolVar(&o.NoHeaders, "no-headers", o.NoHeaders, "If present, print output without headers.")
|
||||
o.HeapsterOptions.Bind(cmd.Flags())
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (o *TopPodOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error {
|
||||
var err error
|
||||
if len(args) == 1 {
|
||||
o.ResourceName = args[0]
|
||||
} else if len(args) > 1 {
|
||||
return cmdutil.UsageErrorf(cmd, "%s", cmd.Use)
|
||||
}
|
||||
|
||||
o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clientset, err := f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.DiscoveryClient = clientset.DiscoveryClient
|
||||
config, err := f.ToRESTConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.MetricsClient, err = metricsclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.PodClient = clientset.CoreV1()
|
||||
o.Client = metricsutil.NewHeapsterMetricsClient(clientset.CoreV1(), o.HeapsterOptions.Namespace, o.HeapsterOptions.Scheme, o.HeapsterOptions.Service, o.HeapsterOptions.Port)
|
||||
|
||||
o.Printer = metricsutil.NewTopCmdPrinter(o.Out)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *TopPodOptions) Validate() error {
|
||||
if len(o.ResourceName) > 0 && len(o.Selector) > 0 {
|
||||
return errors.New("only one of NAME or --selector can be provided")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o TopPodOptions) RunTopPod() error {
|
||||
var err error
|
||||
selector := labels.Everything()
|
||||
if len(o.Selector) > 0 {
|
||||
selector, err = labels.Parse(o.Selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
apiGroups, err := o.DiscoveryClient.ServerGroups()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metricsAPIAvailable := SupportedMetricsAPIVersionAvailable(apiGroups)
|
||||
|
||||
metrics := &metricsapi.PodMetricsList{}
|
||||
if metricsAPIAvailable {
|
||||
metrics, err = getMetricsFromMetricsAPI(o.MetricsClient, o.Namespace, o.ResourceName, o.AllNamespaces, selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
metrics, err = o.Client.GetPodMetrics(o.Namespace, o.ResourceName, o.AllNamespaces, selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Refactor this once Heapster becomes the API server.
|
||||
// First we check why no metrics have been received.
|
||||
if len(metrics.Items) == 0 {
|
||||
// If the API server query is successful but all the pods are newly created,
|
||||
// the metrics are probably not ready yet, so we return the error here in the first place.
|
||||
e := verifyEmptyMetrics(o, selector)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.Printer.PrintPodMetrics(metrics.Items, o.PrintContainers, o.AllNamespaces, o.NoHeaders)
|
||||
}
|
||||
|
||||
func getMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, namespace, resourceName string, allNamespaces bool, selector labels.Selector) (*metricsapi.PodMetricsList, error) {
|
||||
var err error
|
||||
ns := metav1.NamespaceAll
|
||||
if !allNamespaces {
|
||||
ns = namespace
|
||||
}
|
||||
versionedMetrics := &metricsv1beta1api.PodMetricsList{}
|
||||
if resourceName != "" {
|
||||
m, err := metricsClient.Metrics().PodMetricses(ns).Get(resourceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
versionedMetrics.Items = []metricsv1beta1api.PodMetrics{*m}
|
||||
} else {
|
||||
versionedMetrics, err = metricsClient.Metrics().PodMetricses(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
metrics := &metricsapi.PodMetricsList{}
|
||||
err = metricsv1beta1api.Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(versionedMetrics, metrics, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func verifyEmptyMetrics(o TopPodOptions, selector labels.Selector) error {
|
||||
if len(o.ResourceName) > 0 {
|
||||
pod, err := o.PodClient.Pods(o.Namespace).Get(o.ResourceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkPodAge(pod); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
pods, err := o.PodClient.Pods(o.Namespace).List(metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(pods.Items) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if err := checkPodAge(&pod); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.New("metrics not available yet")
|
||||
}
|
||||
|
||||
func checkPodAge(pod *v1.Pod) error {
|
||||
age := time.Since(pod.CreationTimestamp.Time)
|
||||
if age > metricsCreationDelay {
|
||||
message := fmt.Sprintf("Metrics not available for pod %s/%s, age: %s", pod.Namespace, pod.Name, age.String())
|
||||
klog.Warningf(message)
|
||||
return errors.New(message)
|
||||
} else {
|
||||
klog.V(2).Infof("Metrics not yet available for pod %s/%s, age: %s", pod.Namespace, pod.Name, age.String())
|
||||
return nil
|
||||
}
|
||||
}
|
731
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_pod_test.go
generated
vendored
Normal file
731
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_pod_test.go
generated
vendored
Normal file
@ -0,0 +1,731 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package top
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/googleapis/gnostic/OpenAPIv2"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
apiversion "k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/rest/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl/scheme"
|
||||
metricsv1alpha1api "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
)
|
||||
|
||||
const (
|
||||
topPathPrefix = baseMetricsAddress + "/" + metricsApiVersion
|
||||
topMetricsAPIPathPrefix = "/apis/metrics.k8s.io/v1beta1"
|
||||
apibody = `{
|
||||
"kind": "APIVersions",
|
||||
"versions": [
|
||||
"v1"
|
||||
],
|
||||
"serverAddressByClientCIDRs": [
|
||||
{
|
||||
"clientCIDR": "0.0.0.0/0",
|
||||
"serverAddress": "10.0.2.15:8443"
|
||||
}
|
||||
]
|
||||
}`
|
||||
// This is not the full output one would usually get, just a trimmed down version.
|
||||
apisbody = `{
|
||||
"kind": "APIGroupList",
|
||||
"apiVersion": "v1",
|
||||
"groups": [{}]
|
||||
}`
|
||||
|
||||
apisbodyWithMetrics = `{
|
||||
"kind": "APIGroupList",
|
||||
"apiVersion": "v1",
|
||||
"groups": [
|
||||
{
|
||||
"name":"metrics.k8s.io",
|
||||
"versions":[
|
||||
{
|
||||
"groupVersion":"metrics.k8s.io/v1beta1",
|
||||
"version":"v1beta1"
|
||||
}
|
||||
],
|
||||
"preferredVersion":{
|
||||
"groupVersion":"metrics.k8s.io/v1beta1",
|
||||
"version":"v1beta1"
|
||||
},
|
||||
"serverAddressByClientCIDRs":null
|
||||
}
|
||||
]
|
||||
}`
|
||||
)
|
||||
|
||||
func TestTopPod(t *testing.T) {
|
||||
testNS := "testns"
|
||||
testCases := []struct {
|
||||
name string
|
||||
flags map[string]string
|
||||
args []string
|
||||
expectedPath string
|
||||
expectedQuery string
|
||||
namespaces []string
|
||||
containers bool
|
||||
listsNamespaces bool
|
||||
}{
|
||||
{
|
||||
name: "all namespaces",
|
||||
flags: map[string]string{"all-namespaces": "true"},
|
||||
expectedPath: topPathPrefix + "/pods",
|
||||
namespaces: []string{testNS, "secondtestns", "thirdtestns"},
|
||||
listsNamespaces: true,
|
||||
},
|
||||
{
|
||||
name: "all in namespace",
|
||||
expectedPath: topPathPrefix + "/namespaces/" + testNS + "/pods",
|
||||
namespaces: []string{testNS, testNS},
|
||||
},
|
||||
{
|
||||
name: "pod with name",
|
||||
args: []string{"pod1"},
|
||||
expectedPath: topPathPrefix + "/namespaces/" + testNS + "/pods/pod1",
|
||||
namespaces: []string{testNS},
|
||||
},
|
||||
{
|
||||
name: "pod with label selector",
|
||||
flags: map[string]string{"selector": "key=value"},
|
||||
expectedPath: topPathPrefix + "/namespaces/" + testNS + "/pods",
|
||||
expectedQuery: "labelSelector=" + url.QueryEscape("key=value"),
|
||||
namespaces: []string{testNS, testNS},
|
||||
},
|
||||
{
|
||||
name: "pod with container metrics",
|
||||
flags: map[string]string{"containers": "true"},
|
||||
args: []string{"pod1"},
|
||||
expectedPath: topPathPrefix + "/namespaces/" + testNS + "/pods/pod1",
|
||||
namespaces: []string{testNS},
|
||||
containers: true,
|
||||
},
|
||||
{
|
||||
name: "no-headers set",
|
||||
flags: map[string]string{"containers": "true", "no-headers": "true"},
|
||||
args: []string{"pod1"},
|
||||
expectedPath: topPathPrefix + "/namespaces/" + testNS + "/pods/pod1",
|
||||
namespaces: []string{testNS},
|
||||
containers: true,
|
||||
},
|
||||
}
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
t.Logf("Running test case: %s", testCase.name)
|
||||
metricsList := testPodMetricsData()
|
||||
var expectedMetrics []metricsv1alpha1api.PodMetrics
|
||||
var expectedContainerNames, nonExpectedMetricsNames []string
|
||||
for n, m := range metricsList {
|
||||
if n < len(testCase.namespaces) {
|
||||
m.Namespace = testCase.namespaces[n]
|
||||
expectedMetrics = append(expectedMetrics, m)
|
||||
for _, c := range m.Containers {
|
||||
expectedContainerNames = append(expectedContainerNames, c.Name)
|
||||
}
|
||||
} else {
|
||||
nonExpectedMetricsNames = append(nonExpectedMetricsNames, m.Name)
|
||||
}
|
||||
}
|
||||
|
||||
var response interface{}
|
||||
if len(expectedMetrics) == 1 {
|
||||
response = expectedMetrics[0]
|
||||
} else {
|
||||
response = metricsv1alpha1api.PodMetricsList{
|
||||
ListMeta: metav1.ListMeta{
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Items: expectedMetrics,
|
||||
}
|
||||
}
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace(testNS)
|
||||
defer tf.Cleanup()
|
||||
|
||||
ns := scheme.Codecs
|
||||
|
||||
tf.Client = &fake.RESTClient{
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m, q := req.URL.Path, req.Method, req.URL.RawQuery; {
|
||||
case p == "/api":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apibody)))}, nil
|
||||
case p == "/apis":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apisbody)))}, nil
|
||||
case p == testCase.expectedPath && m == "GET" && (testCase.expectedQuery == "" || q == testCase.expectedQuery):
|
||||
body, err := marshallBody(response)
|
||||
if err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", testCase.name, err)
|
||||
}
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: body}, nil
|
||||
default:
|
||||
t.Fatalf("%s: unexpected request: %#v\nGot URL: %#v\nExpected path: %#v\nExpected query: %#v",
|
||||
testCase.name, req, req.URL, testCase.expectedPath, testCase.expectedQuery)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
|
||||
|
||||
cmd := NewCmdTopPod(tf, nil, streams)
|
||||
for name, value := range testCase.flags {
|
||||
cmd.Flags().Set(name, value)
|
||||
}
|
||||
cmd.Run(cmd, testCase.args)
|
||||
|
||||
// Check the presence of pod names&namespaces/container names in the output.
|
||||
result := buf.String()
|
||||
if testCase.containers {
|
||||
for _, containerName := range expectedContainerNames {
|
||||
if !strings.Contains(result, containerName) {
|
||||
t.Errorf("%s: missing metrics for container %s: \n%s", testCase.name, containerName, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, m := range expectedMetrics {
|
||||
if !strings.Contains(result, m.Name) {
|
||||
t.Errorf("%s: missing metrics for %s: \n%s", testCase.name, m.Name, result)
|
||||
}
|
||||
if testCase.listsNamespaces && !strings.Contains(result, m.Namespace) {
|
||||
t.Errorf("%s: missing metrics for %s/%s: \n%s", testCase.name, m.Namespace, m.Name, result)
|
||||
}
|
||||
}
|
||||
for _, name := range nonExpectedMetricsNames {
|
||||
if strings.Contains(result, name) {
|
||||
t.Errorf("%s: unexpected metrics for %s: \n%s", testCase.name, name, result)
|
||||
}
|
||||
}
|
||||
if cmdutil.GetFlagBool(cmd, "no-headers") && strings.Contains(result, "MEMORY") {
|
||||
t.Errorf("%s: unexpected headers with no-headers option set: \n%s", testCase.name, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopPodWithMetricsServer(t *testing.T) {
|
||||
testNS := "testns"
|
||||
testCases := []struct {
|
||||
name string
|
||||
namespace string
|
||||
options *TopPodOptions
|
||||
args []string
|
||||
expectedPath string
|
||||
expectedQuery string
|
||||
namespaces []string
|
||||
containers bool
|
||||
listsNamespaces bool
|
||||
}{
|
||||
{
|
||||
name: "all namespaces",
|
||||
options: &TopPodOptions{AllNamespaces: true},
|
||||
expectedPath: topMetricsAPIPathPrefix + "/pods",
|
||||
namespaces: []string{testNS, "secondtestns", "thirdtestns"},
|
||||
listsNamespaces: true,
|
||||
},
|
||||
{
|
||||
name: "all in namespace",
|
||||
expectedPath: topMetricsAPIPathPrefix + "/namespaces/" + testNS + "/pods",
|
||||
namespaces: []string{testNS, testNS},
|
||||
},
|
||||
{
|
||||
name: "pod with name",
|
||||
args: []string{"pod1"},
|
||||
expectedPath: topMetricsAPIPathPrefix + "/namespaces/" + testNS + "/pods/pod1",
|
||||
namespaces: []string{testNS},
|
||||
},
|
||||
{
|
||||
name: "pod with label selector",
|
||||
options: &TopPodOptions{Selector: "key=value"},
|
||||
expectedPath: topMetricsAPIPathPrefix + "/namespaces/" + testNS + "/pods",
|
||||
expectedQuery: "labelSelector=" + url.QueryEscape("key=value"),
|
||||
namespaces: []string{testNS, testNS},
|
||||
},
|
||||
{
|
||||
name: "pod with container metrics",
|
||||
options: &TopPodOptions{PrintContainers: true},
|
||||
args: []string{"pod1"},
|
||||
expectedPath: topMetricsAPIPathPrefix + "/namespaces/" + testNS + "/pods/pod1",
|
||||
namespaces: []string{testNS},
|
||||
containers: true,
|
||||
},
|
||||
}
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
metricsList := testV1beta1PodMetricsData()
|
||||
var expectedMetrics []metricsv1beta1api.PodMetrics
|
||||
var expectedContainerNames, nonExpectedMetricsNames []string
|
||||
for n, m := range metricsList {
|
||||
if n < len(testCase.namespaces) {
|
||||
m.Namespace = testCase.namespaces[n]
|
||||
expectedMetrics = append(expectedMetrics, m)
|
||||
for _, c := range m.Containers {
|
||||
expectedContainerNames = append(expectedContainerNames, c.Name)
|
||||
}
|
||||
} else {
|
||||
nonExpectedMetricsNames = append(nonExpectedMetricsNames, m.Name)
|
||||
}
|
||||
}
|
||||
|
||||
fakemetricsClientset := &metricsfake.Clientset{}
|
||||
|
||||
if len(expectedMetrics) == 1 {
|
||||
fakemetricsClientset.AddReactor("get", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &expectedMetrics[0], nil
|
||||
})
|
||||
} else {
|
||||
fakemetricsClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
res := &metricsv1beta1api.PodMetricsList{
|
||||
ListMeta: metav1.ListMeta{
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Items: expectedMetrics,
|
||||
}
|
||||
return true, res, nil
|
||||
})
|
||||
}
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace(testNS)
|
||||
defer tf.Cleanup()
|
||||
|
||||
ns := scheme.Codecs
|
||||
|
||||
tf.Client = &fake.RESTClient{
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p := req.URL.Path; {
|
||||
case p == "/api":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apibody)))}, nil
|
||||
case p == "/apis":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apisbodyWithMetrics)))}, nil
|
||||
default:
|
||||
t.Fatalf("%s: unexpected request: %#v\nGot URL: %#v",
|
||||
testCase.name, req, req.URL)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
|
||||
|
||||
cmd := NewCmdTopPod(tf, nil, streams)
|
||||
var cmdOptions *TopPodOptions
|
||||
if testCase.options != nil {
|
||||
cmdOptions = testCase.options
|
||||
} else {
|
||||
cmdOptions = &TopPodOptions{}
|
||||
}
|
||||
cmdOptions.IOStreams = streams
|
||||
|
||||
// TODO in the long run, we want to test most of our commands like this. Wire the options struct with specific mocks
|
||||
// TODO then check the particular Run functionality and harvest results from fake clients. We probably end up skipping the factory altogether.
|
||||
if err := cmdOptions.Complete(tf, cmd, testCase.args); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cmdOptions.MetricsClient = fakemetricsClientset
|
||||
if err := cmdOptions.Validate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cmdOptions.RunTopPod(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check the presence of pod names&namespaces/container names in the output.
|
||||
result := buf.String()
|
||||
if testCase.containers {
|
||||
for _, containerName := range expectedContainerNames {
|
||||
if !strings.Contains(result, containerName) {
|
||||
t.Errorf("missing metrics for container %s: \n%s", containerName, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, m := range expectedMetrics {
|
||||
if !strings.Contains(result, m.Name) {
|
||||
t.Errorf("missing metrics for %s: \n%s", m.Name, result)
|
||||
}
|
||||
if testCase.listsNamespaces && !strings.Contains(result, m.Namespace) {
|
||||
t.Errorf("missing metrics for %s/%s: \n%s", m.Namespace, m.Name, result)
|
||||
}
|
||||
}
|
||||
for _, name := range nonExpectedMetricsNames {
|
||||
if strings.Contains(result, name) {
|
||||
t.Errorf("unexpected metrics for %s: \n%s", name, result)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeDiscovery struct{}
|
||||
|
||||
// ServerGroups returns the supported groups, with information like supported versions and the
|
||||
// preferred version.
|
||||
func (d *fakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ServerResourcesForGroupVersion returns the supported resources for a group and version.
|
||||
func (d *fakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ServerResources returns the supported resources for all groups and versions.
|
||||
func (d *fakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ServerPreferredResources returns the supported resources with the version preferred by the
|
||||
// server.
|
||||
func (d *fakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ServerPreferredNamespacedResources returns the supported namespaced resources with the
|
||||
// version preferred by the server.
|
||||
func (d *fakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ServerVersion retrieves and parses the server's version (git version).
|
||||
func (d *fakeDiscovery) ServerVersion() (*apiversion.Info, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// OpenAPISchema retrieves and parses the swagger API schema the server supports.
|
||||
func (d *fakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (d *fakeDiscovery) RESTClient() restclient.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestTopPodCustomDefaults(t *testing.T) {
|
||||
customBaseHeapsterServiceAddress := "/api/v1/namespaces/custom-namespace/services/https:custom-heapster-service:/proxy"
|
||||
customBaseMetricsAddress := customBaseHeapsterServiceAddress + "/apis/metrics"
|
||||
customTopPathPrefix := customBaseMetricsAddress + "/" + metricsApiVersion
|
||||
|
||||
testNS := "custom-namespace"
|
||||
testCases := []struct {
|
||||
name string
|
||||
flags map[string]string
|
||||
args []string
|
||||
expectedPath string
|
||||
expectedQuery string
|
||||
namespaces []string
|
||||
containers bool
|
||||
listsNamespaces bool
|
||||
}{
|
||||
{
|
||||
name: "all namespaces",
|
||||
flags: map[string]string{"all-namespaces": "true"},
|
||||
expectedPath: customTopPathPrefix + "/pods",
|
||||
namespaces: []string{testNS, "secondtestns", "thirdtestns"},
|
||||
listsNamespaces: true,
|
||||
},
|
||||
{
|
||||
name: "all in namespace",
|
||||
expectedPath: customTopPathPrefix + "/namespaces/" + testNS + "/pods",
|
||||
namespaces: []string{testNS, testNS},
|
||||
},
|
||||
{
|
||||
name: "pod with name",
|
||||
args: []string{"pod1"},
|
||||
expectedPath: customTopPathPrefix + "/namespaces/" + testNS + "/pods/pod1",
|
||||
namespaces: []string{testNS},
|
||||
},
|
||||
{
|
||||
name: "pod with label selector",
|
||||
flags: map[string]string{"selector": "key=value"},
|
||||
expectedPath: customTopPathPrefix + "/namespaces/" + testNS + "/pods",
|
||||
expectedQuery: "labelSelector=" + url.QueryEscape("key=value"),
|
||||
namespaces: []string{testNS, testNS},
|
||||
},
|
||||
{
|
||||
name: "pod with container metrics",
|
||||
flags: map[string]string{"containers": "true"},
|
||||
args: []string{"pod1"},
|
||||
expectedPath: customTopPathPrefix + "/namespaces/" + testNS + "/pods/pod1",
|
||||
namespaces: []string{testNS},
|
||||
containers: true,
|
||||
},
|
||||
}
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
t.Logf("Running test case: %s", testCase.name)
|
||||
metricsList := testPodMetricsData()
|
||||
var expectedMetrics []metricsv1alpha1api.PodMetrics
|
||||
var expectedContainerNames, nonExpectedMetricsNames []string
|
||||
for n, m := range metricsList {
|
||||
if n < len(testCase.namespaces) {
|
||||
m.Namespace = testCase.namespaces[n]
|
||||
expectedMetrics = append(expectedMetrics, m)
|
||||
for _, c := range m.Containers {
|
||||
expectedContainerNames = append(expectedContainerNames, c.Name)
|
||||
}
|
||||
} else {
|
||||
nonExpectedMetricsNames = append(nonExpectedMetricsNames, m.Name)
|
||||
}
|
||||
}
|
||||
|
||||
var response interface{}
|
||||
if len(expectedMetrics) == 1 {
|
||||
response = expectedMetrics[0]
|
||||
} else {
|
||||
response = metricsv1alpha1api.PodMetricsList{
|
||||
ListMeta: metav1.ListMeta{
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Items: expectedMetrics,
|
||||
}
|
||||
}
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace(testNS)
|
||||
defer tf.Cleanup()
|
||||
|
||||
ns := scheme.Codecs
|
||||
|
||||
tf.Client = &fake.RESTClient{
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m, q := req.URL.Path, req.Method, req.URL.RawQuery; {
|
||||
case p == "/api":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apibody)))}, nil
|
||||
case p == "/apis":
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte(apisbody)))}, nil
|
||||
case p == testCase.expectedPath && m == "GET" && (testCase.expectedQuery == "" || q == testCase.expectedQuery):
|
||||
body, err := marshallBody(response)
|
||||
if err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", testCase.name, err)
|
||||
}
|
||||
return &http.Response{StatusCode: 200, Header: cmdtesting.DefaultHeader(), Body: body}, nil
|
||||
default:
|
||||
t.Fatalf("%s: unexpected request: %#v\nGot URL: %#v\nExpected path: %#v\nExpected query: %#v",
|
||||
testCase.name, req, req.URL, testCase.expectedPath, testCase.expectedQuery)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
streams, _, buf, _ := genericclioptions.NewTestIOStreams()
|
||||
|
||||
opts := &TopPodOptions{
|
||||
HeapsterOptions: HeapsterTopOptions{
|
||||
Namespace: "custom-namespace",
|
||||
Scheme: "https",
|
||||
Service: "custom-heapster-service",
|
||||
},
|
||||
DiscoveryClient: &fakeDiscovery{},
|
||||
IOStreams: streams,
|
||||
}
|
||||
cmd := NewCmdTopPod(tf, opts, streams)
|
||||
for name, value := range testCase.flags {
|
||||
cmd.Flags().Set(name, value)
|
||||
}
|
||||
cmd.Run(cmd, testCase.args)
|
||||
|
||||
// Check the presence of pod names&namespaces/container names in the output.
|
||||
result := buf.String()
|
||||
if testCase.containers {
|
||||
for _, containerName := range expectedContainerNames {
|
||||
if !strings.Contains(result, containerName) {
|
||||
t.Errorf("%s: missing metrics for container %s: \n%s", testCase.name, containerName, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, m := range expectedMetrics {
|
||||
if !strings.Contains(result, m.Name) {
|
||||
t.Errorf("%s: missing metrics for %s: \n%s", testCase.name, m.Name, result)
|
||||
}
|
||||
if testCase.listsNamespaces && !strings.Contains(result, m.Namespace) {
|
||||
t.Errorf("%s: missing metrics for %s/%s: \n%s", testCase.name, m.Namespace, m.Name, result)
|
||||
}
|
||||
}
|
||||
for _, name := range nonExpectedMetricsNames {
|
||||
if strings.Contains(result, name) {
|
||||
t.Errorf("%s: unexpected metrics for %s: \n%s", testCase.name, name, result)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testV1beta1PodMetricsData() []metricsv1beta1api.PodMetrics {
|
||||
return []metricsv1beta1api.PodMetrics{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "test", ResourceVersion: "10", Labels: map[string]string{"key": "value"}},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Containers: []metricsv1beta1api.ContainerMetrics{
|
||||
{
|
||||
Name: "container1-1",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container1-2",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(4, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(5*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod2", Namespace: "test", ResourceVersion: "11", Labels: map[string]string{"key": "value"}},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Containers: []metricsv1beta1api.ContainerMetrics{
|
||||
{
|
||||
Name: "container2-1",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(7, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(8*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(9*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container2-2",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(11*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(12*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container2-3",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(13, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(14*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(15*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod3", Namespace: "test", ResourceVersion: "12"},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Containers: []metricsv1beta1api.ContainerMetrics{
|
||||
{
|
||||
Name: "container3-1",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(7, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(8*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(9*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testPodMetricsData() []metricsv1alpha1api.PodMetrics {
|
||||
return []metricsv1alpha1api.PodMetrics{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "test", ResourceVersion: "10"},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Containers: []metricsv1alpha1api.ContainerMetrics{
|
||||
{
|
||||
Name: "container1-1",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container1-2",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(4, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(5*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod2", Namespace: "test", ResourceVersion: "11"},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Containers: []metricsv1alpha1api.ContainerMetrics{
|
||||
{
|
||||
Name: "container2-1",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(7, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(8*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(9*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container2-2",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(11*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(12*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container2-3",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(13, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(14*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(15*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod3", Namespace: "test", ResourceVersion: "12"},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Containers: []metricsv1alpha1api.ContainerMetrics{
|
||||
{
|
||||
Name: "container3-1",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(7, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(8*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(9*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
174
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_test.go
generated
vendored
Normal file
174
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/top/top_test.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package top
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing"
|
||||
metricsv1alpha1api "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
baseHeapsterServiceAddress = "/api/v1/namespaces/kube-system/services/http:heapster:/proxy"
|
||||
baseMetricsAddress = baseHeapsterServiceAddress + "/apis/metrics"
|
||||
baseMetricsServerAddress = "/apis/metrics.k8s.io/v1beta1"
|
||||
metricsApiVersion = "v1alpha1"
|
||||
)
|
||||
|
||||
func TestTopSubcommandsExist(t *testing.T) {
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
|
||||
f := cmdtesting.NewTestFactory()
|
||||
defer f.Cleanup()
|
||||
|
||||
cmd := NewCmdTop(f, genericclioptions.NewTestIOStreamsDiscard())
|
||||
if !cmd.HasSubCommands() {
|
||||
t.Error("top command should have subcommands")
|
||||
}
|
||||
}
|
||||
|
||||
func marshallBody(metrics interface{}) (io.ReadCloser, error) {
|
||||
result, err := json.Marshal(metrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewReader(result)), nil
|
||||
}
|
||||
|
||||
func testNodeV1alpha1MetricsData() (*metricsv1alpha1api.NodeMetricsList, *v1.NodeList) {
|
||||
metrics := &metricsv1alpha1api.NodeMetricsList{
|
||||
ListMeta: metav1.ListMeta{
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Items: []metricsv1alpha1api.NodeMetrics{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1", ResourceVersion: "10"},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2", ResourceVersion: "11"},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(5, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(7*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nodes := &v1.NodeList{
|
||||
ListMeta: metav1.ListMeta{
|
||||
ResourceVersion: "15",
|
||||
},
|
||||
Items: []v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1", ResourceVersion: "10"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(30*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2", ResourceVersion: "11"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(60*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(70*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return metrics, nodes
|
||||
}
|
||||
|
||||
func testNodeV1beta1MetricsData() (*metricsv1beta1api.NodeMetricsList, *v1.NodeList) {
|
||||
metrics := &metricsv1beta1api.NodeMetricsList{
|
||||
ListMeta: metav1.ListMeta{
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Items: []metricsv1beta1api.NodeMetrics{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1", ResourceVersion: "10", Labels: map[string]string{"key": "value"}},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2", ResourceVersion: "11"},
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(5, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(7*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nodes := &v1.NodeList{
|
||||
ListMeta: metav1.ListMeta{
|
||||
ResourceVersion: "15",
|
||||
},
|
||||
Items: []v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1", ResourceVersion: "10"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(30*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2", ResourceVersion: "11"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(60*(1024*1024), resource.DecimalSI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(70*(1024*1024), resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return metrics, nodes
|
||||
}
|
Reference in New Issue
Block a user