vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -57,7 +57,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
// Check for the existence of the Kibana service.
ginkgo.By("Checking the Kibana service exists.")
s := f.ClientSet.Core().Services(metav1.NamespaceSystem)
s := f.ClientSet.CoreV1().Services(metav1.NamespaceSystem)
// Make a few attempts to connect. This makes the test robust against
// being run as the first e2e test just after the e2e cluster has been created.
err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
@ -73,7 +73,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
ginkgo.By("Checking to make sure the Kibana pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options)
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, pod := range pods.Items {
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
@ -82,7 +82,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
ginkgo.By("Checking to make sure we get a response from the Kibana UI.")
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
if err != nil {
framework.Logf("Failed to get services proxy request: %v", err)
return false, nil

View File

@ -55,7 +55,7 @@ func (p *esLogProvider) Init() error {
f := p.Framework
// Check for the existence of the Elasticsearch service.
framework.Logf("Checking the Elasticsearch service exists.")
s := f.ClientSet.Core().Services(api.NamespaceSystem)
s := f.ClientSet.CoreV1().Services(api.NamespaceSystem)
// Make a few attempts to connect. This makes the test robust against
// being run as the first e2e test just after the e2e cluster has been created.
var err error
@ -73,7 +73,7 @@ func (p *esLogProvider) Init() error {
framework.Logf("Checking to make sure the Elasticsearch pods are running")
labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String()
options := meta_v1.ListOptions{LabelSelector: labelSelector}
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options)
if err != nil {
return err
}
@ -90,7 +90,7 @@ func (p *esLogProvider) Init() error {
err = nil
var body []byte
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
if errProxy != nil {
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue
@ -124,7 +124,7 @@ func (p *esLogProvider) Init() error {
framework.Logf("Checking health of Elasticsearch service.")
healthy := false
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
if errProxy != nil {
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue
@ -172,7 +172,7 @@ func (p *esLogProvider) Cleanup() {
func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
f := p.Framework
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
if errProxy != nil {
framework.Logf("Failed to get services proxy request: %v", errProxy)
return nil

View File

@ -119,7 +119,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
framework.PodStateVerification{
Selectors: podlables,
ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
// we don't validate total log data, since there is no gaurantee all logs will be stored forever.
// we don't validate total log data, since there is no guarantee all logs will be stored forever.
// instead, we just validate that some logs are being created in std out.
Verify: func(p v1.Pod) (bool, error) {
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)

View File

@ -18,7 +18,6 @@ package stackdriver
import (
"fmt"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
@ -107,19 +106,14 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
err = utils.WaitForLogs(c, ingestionInterval, ingestionTimeout)
framework.ExpectNoError(err)
})
})
})
ginkgo.It("should ingest logs [Feature:StackdriverLogging]", func() {
withLogProviderForScope(f, podsScope, func(p *sdLogProvider) {
ginkgo.By("Checking that too long lines are trimmed", func() {
originalLength := 100001
maxLength := 100 * 1024
cmd := []string{
"/bin/sh",
"-c",
fmt.Sprintf("while :; do printf '%%*s' %d | tr ' ' 'A'; echo; sleep 60; done", originalLength),
fmt.Sprintf("while :; do printf '%%*s' %d | tr ' ' 'A'; echo; sleep 60; done", maxLength+1),
}
trimPrefix := "[Trimmed]"
pod, err := utils.StartAndReturnSelf(utils.NewExecLoggingPod("synthlogger-4", cmd), f)
framework.ExpectNoError(err, "Failed to start a pod")
@ -133,11 +127,8 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
if log.JSONPayload != nil {
return false, fmt.Errorf("got json log entry %v, wanted plain text", log.JSONPayload)
}
if len(log.TextPayload) == originalLength {
return false, fmt.Errorf("got non-trimmed entry of length %d", len(log.TextPayload))
}
if !strings.HasPrefix(log.TextPayload, trimPrefix) {
return false, fmt.Errorf("got message without prefix '%s': %s", trimPrefix, log.TextPayload)
if len(log.TextPayload) > maxLength {
return false, fmt.Errorf("got too long entry of length %d", len(log.TextPayload))
}
return true, nil
}, utils.JustTimeout, pod.Name())
@ -187,9 +178,9 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
framework.ExpectNoError(err)
})
ginkgo.By("Waiting for some docker logs to be ingested from each node", func() {
ginkgo.By("Waiting for some container runtime logs to be ingested from each node", func() {
nodeIds := utils.GetNodeIds(f.ClientSet)
log := fmt.Sprintf("projects/%s/logs/docker", framework.TestContext.CloudConfig.ProjectID)
log := fmt.Sprintf("projects/%s/logs/container-runtime", framework.TestContext.CloudConfig.ProjectID)
c := utils.NewLogChecker(p, utils.UntilFirstEntryFromLog(log), utils.JustTimeout, nodeIds...)
err := utils.WaitForLogs(c, ingestionInterval, ingestionTimeout)
framework.ExpectNoError(err)

View File

@ -20,6 +20,7 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/wait"
@ -45,6 +46,9 @@ const (
// PubSub topic with log entries polling interval
sdLoggingPollInterval = 100 * time.Millisecond
// The parallelism level of polling logs process.
sdLoggingPollParallelism = 10
)
type logProviderScope int
@ -68,6 +72,7 @@ type sdLogProvider struct {
logSink *sd.LogSink
pollingStopChannel chan struct{}
pollingWG *sync.WaitGroup
queueCollection utils.LogsQueueCollection
@ -92,7 +97,8 @@ func newSdLogProvider(f *framework.Framework, scope logProviderScope) (*sdLogPro
sdService: sdService,
pubsubService: pubsubService,
framework: f,
pollingStopChannel: make(chan struct{}, 1),
pollingStopChannel: make(chan struct{}),
pollingWG: &sync.WaitGroup{},
queueCollection: utils.NewLogsQueueCollection(maxQueueSize),
}
return provider, nil
@ -128,13 +134,14 @@ func (p *sdLogProvider) Init() error {
return fmt.Errorf("failed to wait for sink to become operational: %v", err)
}
go p.pollLogs()
p.startPollingLogs()
return nil
}
func (p *sdLogProvider) Cleanup() {
p.pollingStopChannel <- struct{}{}
close(p.pollingStopChannel)
p.pollingWG.Wait()
if p.logSink != nil {
projectID := framework.TestContext.CloudConfig.ProjectID
@ -257,44 +264,54 @@ func (p *sdLogProvider) waitSinkInit() error {
})
}
func (p *sdLogProvider) pollLogs() {
wait.PollUntil(sdLoggingPollInterval, func() (bool, error) {
messages, err := pullAndAck(p.pubsubService, p.subscription)
func (p *sdLogProvider) startPollingLogs() {
for i := 0; i < sdLoggingPollParallelism; i++ {
p.pollingWG.Add(1)
go func() {
defer p.pollingWG.Done()
wait.PollUntil(sdLoggingPollInterval, func() (bool, error) {
p.pollLogsOnce()
return false, nil
}, p.pollingStopChannel)
}()
}
}
func (p *sdLogProvider) pollLogsOnce() {
messages, err := pullAndAck(p.pubsubService, p.subscription)
if err != nil {
framework.Logf("Failed to pull messages from PubSub due to %v", err)
return
}
for _, msg := range messages {
logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data)
if err != nil {
framework.Logf("Failed to pull messages from PubSub due to %v", err)
return false, nil
framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
continue
}
for _, msg := range messages {
logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data)
if err != nil {
framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
continue
}
var sdLogEntry sd.LogEntry
if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil {
framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
continue
}
name, ok := p.tryGetName(sdLogEntry)
if !ok {
framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
continue
}
logEntry, err := convertLogEntry(sdLogEntry)
if err != nil {
framework.Logf("Failed to parse Stackdriver LogEntry: %v", err)
continue
}
p.queueCollection.Push(name, logEntry)
var sdLogEntry sd.LogEntry
if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil {
framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
continue
}
return false, nil
}, p.pollingStopChannel)
name, ok := p.tryGetName(sdLogEntry)
if !ok {
framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
continue
}
logEntry, err := convertLogEntry(sdLogEntry)
if err != nil {
framework.Logf("Failed to parse Stackdriver LogEntry: %v", err)
continue
}
p.queueCollection.Push(name, logEntry)
}
}
func (p *sdLogProvider) tryGetName(sdLogEntry sd.LogEntry) (string, bool) {
@ -311,6 +328,8 @@ func (p *sdLogProvider) tryGetName(sdLogEntry sd.LogEntry) (string, bool) {
func convertLogEntry(sdLogEntry sd.LogEntry) (entry utils.LogEntry, err error) {
entry = utils.LogEntry{LogName: sdLogEntry.LogName}
entry.Location = sdLogEntry.Resource.Labels["location"]
if sdLogEntry.TextPayload != "" {
entry.TextPayload = sdLogEntry.TextPayload
return

View File

@ -19,6 +19,7 @@ go_library(
deps = [
"//pkg/apis/core:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -21,10 +21,12 @@ import (
"time"
"fmt"
api_v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@ -101,7 +103,7 @@ func (p *loadLoggingPod) Start(f *framework.Framework) error {
Containers: []api_v1.Container{
{
Name: loggingContainerName,
Image: "gcr.io/google_containers/logs-generator:v0.1.0",
Image: imageutils.GetE2EImage(imageutils.LogsGenerator),
Env: []api_v1.EnvVar{
{
Name: "LOGS_GENERATOR_LINES_TOTAL",

View File

@ -32,6 +32,7 @@ var (
type LogEntry struct {
LogName string
TextPayload string
Location string
JSONPayload map[string]interface{}
}

View File

@ -48,6 +48,9 @@ func UntilFirstEntryFromLog(log string) IngestionPred {
return func(_ string, entries []LogEntry) (bool, error) {
for _, e := range entries {
if e.LogName == log {
if e.Location != framework.TestContext.CloudConfig.Zone {
return false, fmt.Errorf("Bad location in logs '%s' != '%d'", e.Location, framework.TestContext.CloudConfig.Zone)
}
return true, nil
}
}

View File

@ -8,12 +8,14 @@ load(
go_library(
name = "go_default_library",
srcs = [
"accelerator.go",
"cadvisor.go",
"custom_metrics_deployments.go",
"custom_metrics_stackdriver.go",
"influxdb.go",
"metrics_grabber.go",
"stackdriver.go",
"stackdriver_metadata_agent.go",
],
importpath = "k8s.io/kubernetes/test/e2e/instrumentation/monitoring",
deps = [
@ -21,6 +23,8 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/scheduling:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/influxdata/influxdb/client/v2:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
@ -29,6 +33,7 @@ go_library(
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
@ -36,7 +41,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
],
)

View File

@ -0,0 +1,134 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package monitoring
import (
"context"
"os"
"time"
. "github.com/onsi/ginkgo"
"golang.org/x/oauth2/google"
gcm "google.golang.org/api/monitoring/v3"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
"k8s.io/kubernetes/test/e2e/scheduling"
"k8s.io/kubernetes/test/utils/image"
)
// Stackdriver container accelerator metrics, as described here:
// https://cloud.google.com/monitoring/api/metrics_gcp#gcp-container
var acceleratorMetrics = []string{
"accelerator/duty_cycle",
"accelerator/memory_total",
"accelerator/memory_used",
}
var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
})
f := framework.NewDefaultFramework("stackdriver-monitoring")
It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func() {
testStackdriverAcceleratorMonitoring(f)
})
})
func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
projectId := framework.TestContext.CloudConfig.ProjectID
ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
gcmService, err := gcm.New(client)
framework.ExpectNoError(err)
// set this env var if accessing Stackdriver test endpoint (default is prod):
// $ export STACKDRIVER_API_ENDPOINT_OVERRIDE=https://test-monitoring.sandbox.googleapis.com/
basePathOverride := os.Getenv("STACKDRIVER_API_ENDPOINT_OVERRIDE")
if basePathOverride != "" {
gcmService.BasePath = basePathOverride
}
scheduling.SetupNVIDIAGPUNode(f, false)
// TODO: remove this after cAdvisor race is fixed.
time.Sleep(time.Minute)
f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: rcName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: rcName,
Image: image.GetE2EImage(image.CudaVectorAdd),
Command: []string{"/bin/sh", "-c"},
Args: []string{"nvidia-smi && sleep infinity"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
framework.NVIDIAGPUResourceName: *resource.NewQuantity(1, resource.DecimalSI),
},
},
},
},
},
})
metricsMap := map[string]bool{}
pollingFunction := checkForAcceleratorMetrics(projectId, gcmService, time.Now(), metricsMap)
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
if err != nil {
framework.Logf("Missing metrics: %+v", metricsMap)
}
framework.ExpectNoError(err)
}
func checkForAcceleratorMetrics(projectId string, gcmService *gcm.Service, start time.Time, metricsMap map[string]bool) func() (bool, error) {
return func() (bool, error) {
counter := 0
for _, metric := range acceleratorMetrics {
metricsMap[metric] = false
}
for _, metric := range acceleratorMetrics {
// TODO: check only for metrics from this cluster
ts, err := fetchTimeSeries(projectId, gcmService, metric, start, time.Now())
framework.ExpectNoError(err)
if len(ts) > 0 {
counter = counter + 1
metricsMap[metric] = true
framework.Logf("Received %v timeseries for metric %v", len(ts), metric)
} else {
framework.Logf("No timeseries for metric %v", metric)
}
}
if counter < 3 {
return false, nil
}
return true, nil
}
}

View File

@ -68,7 +68,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
for _, node := range nodeList.Items {
// cadvisor is not accessible directly unless its port (4194 by default) is exposed.
// Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally.
statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
statsResource := fmt.Sprintf("api/v1/nodes/%s/proxy/stats/", node.Name)
By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
_, err = c.CoreV1().RESTClient().Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
if err != nil {

View File

@ -18,6 +18,7 @@ package monitoring
import (
"fmt"
gcm "google.golang.org/api/monitoring/v3"
corev1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
@ -27,10 +28,11 @@ import (
)
var (
CustomMetricName = "foo-metric"
UnusedMetricName = "unused-metric"
CustomMetricValue = int64(448)
UnusedMetricValue = int64(446)
CustomMetricName = "foo"
UnusedMetricName = "unused"
CustomMetricValue = int64(448)
UnusedMetricValue = int64(446)
StackdriverExporter = "stackdriver-exporter"
// HPAPermissions is a ClusterRoleBinding that grants unauthenticated user permissions granted for
// HPA for testing purposes, i.e. it should grant permission to read custom metrics.
HPAPermissions = &rbac.ClusterRoleBinding{
@ -52,9 +54,37 @@ var (
}
)
// StackdriverExporterDeployment is a Deployment of simple application that exports a metric of
// CustomMetricContainerSpec allows to specify a config for StackdriverExporterDeployment
// with multiple containers exporting different metrics.
type CustomMetricContainerSpec struct {
Name string
MetricName string
MetricValue int64
}
// SimpleStackdriverExporterDeployment is a Deployment of simple application that exports a metric of
// fixed value to Stackdriver in a loop.
func StackdriverExporterDeployment(name, namespace string, replicas int32, metricValue int64) *extensions.Deployment {
func SimpleStackdriverExporterDeployment(name, namespace string, replicas int32, metricValue int64) *extensions.Deployment {
return StackdriverExporterDeployment(name, namespace, replicas,
[]CustomMetricContainerSpec{
{
Name: StackdriverExporter,
MetricName: CustomMetricName,
MetricValue: metricValue,
},
})
}
// StackdriverExporterDeployment is a Deployment of an application that can expose
// an arbitrary amount of metrics of fixed value to Stackdriver in a loop. Each metric
// is exposed by a different container in one pod.
// The metric names and values are configured via the containers parameter.
func StackdriverExporterDeployment(name, namespace string, replicas int32, containers []CustomMetricContainerSpec) *extensions.Deployment {
podSpec := corev1.PodSpec{Containers: []corev1.Container{}}
for _, containerSpec := range containers {
podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, containerSpec.MetricName, containerSpec.MetricValue))
}
return &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -70,7 +100,7 @@ func StackdriverExporterDeployment(name, namespace string, replicas int32, metri
"name": name,
},
},
Spec: stackdriverExporterPodSpec(CustomMetricName, metricValue),
Spec: podSpec,
},
Replicas: &replicas,
},
@ -88,18 +118,75 @@ func StackdriverExporterPod(podName, namespace, podLabel, metricName string, met
"name": podLabel,
},
},
Spec: stackdriverExporterPodSpec(metricName, metricValue),
Spec: corev1.PodSpec{
Containers: []corev1.Container{stackdriverExporterContainerSpec(StackdriverExporter, metricName, metricValue)},
},
}
}
func stackdriverExporterPodSpec(metricName string, metricValue int64) corev1.PodSpec {
func stackdriverExporterContainerSpec(name string, metricName string, metricValue int64) corev1.Container {
return corev1.Container{
Name: name,
Image: "k8s.gcr.io/sd-dummy-exporter:v0.1.0",
ImagePullPolicy: corev1.PullPolicy("Always"),
Command: []string{"/sd_dummy_exporter", "--pod-id=$(POD_ID)", "--metric-name=" + metricName, fmt.Sprintf("--metric-value=%v", metricValue)},
Env: []corev1.EnvVar{
{
Name: "POD_ID",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.uid",
},
},
},
},
Ports: []corev1.ContainerPort{{ContainerPort: 80}},
}
}
// PrometheusExporterDeployment is a Deployment of simple application with two containers
// one exposing a metric in prometheus fromat and second a prometheus-to-sd container
// that scrapes the metric and pushes it to stackdriver.
func PrometheusExporterDeployment(name, namespace string, replicas int32, metricValue int64) *extensions.Deployment {
return &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: extensions.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": name},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"name": name,
},
},
Spec: prometheusExporterPodSpec(CustomMetricName, metricValue, 8080),
},
Replicas: &replicas,
},
}
}
func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) corev1.PodSpec {
return corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "stackdriver-exporter",
Image: "gcr.io/google-containers/sd-dummy-exporter:v0.1.0",
Name: "prometheus-exporter",
Image: "k8s.gcr.io/prometheus-dummy-exporter:v0.1.0",
ImagePullPolicy: corev1.PullPolicy("Always"),
Command: []string{"/sd_dummy_exporter", "--pod-id=$(POD_ID)", "--metric-name=" + metricName, fmt.Sprintf("--metric-value=%v", metricValue)},
Command: []string{"/prometheus_dummy_exporter", "--metric-name=" + metricName,
fmt.Sprintf("--metric-value=%v", metricValue), fmt.Sprintf("=--port=%d", port)},
Ports: []corev1.ContainerPort{{ContainerPort: port}},
},
{
Name: "prometheus-to-sd",
Image: "k8s.gcr.io/prometheus-to-sd:v0.2.3",
ImagePullPolicy: corev1.PullPolicy("Always"),
Command: []string{"/monitor", fmt.Sprintf("--source=:http://localhost:%d", port),
"--stackdriver-prefix=custom.googleapis.com", "--pod-id=$(POD_ID)", "--namespace-id=$(POD_NAMESPACE)"},
Env: []corev1.EnvVar{
{
Name: "POD_ID",
@ -109,8 +196,15 @@ func stackdriverExporterPodSpec(metricName string, metricValue int64) corev1.Pod
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
Ports: []corev1.ContainerPort{{ContainerPort: 80}},
},
},
}

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/discovery"
kubeaggrcs "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
customclient "k8s.io/metrics/pkg/client/custom_metrics"
)
@ -50,13 +49,11 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
f := framework.NewDefaultFramework("stackdriver-monitoring")
var kubeClient clientset.Interface
var kubeAggrClient kubeaggrcs.Interface
var customMetricsClient customclient.CustomMetricsClient
var discoveryClient *discovery.DiscoveryClient
It("should run Custom Metrics - Stackdriver Adapter [Feature:StackdriverCustomMetrics]", func() {
kubeClient = f.ClientSet
kubeAggrClient = f.AggregatorClient
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("Failed to load config: %s", err)
@ -104,8 +101,8 @@ func testAdapter(f *framework.Framework, kubeClient clientset.Interface, customM
}
defer CleanupAdapter()
_, err = kubeClient.Rbac().ClusterRoleBindings().Create(HPAPermissions)
defer kubeClient.Rbac().ClusterRoleBindings().Delete("custom-metrics-reader", &metav1.DeleteOptions{})
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
defer kubeClient.RbacV1().ClusterRoleBindings().Delete("custom-metrics-reader", &metav1.DeleteOptions{})
// Run application that exports the metric
err = createSDExporterPods(f, kubeClient)

View File

@ -122,7 +122,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
if err != nil {
return nil, err
}
psList, err := c.AppsV1beta1().StatefulSets(metav1.NamespaceSystem).List(options)
psList, err := c.AppsV1().StatefulSets(metav1.NamespaceSystem).List(options)
if err != nil {
return nil, err
}

View File

@ -0,0 +1,169 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package monitoring
import (
"time"
"golang.org/x/oauth2/google"
clientset "k8s.io/client-go/kubernetes"
"context"
"encoding/json"
"fmt"
. "github.com/onsi/ginkgo"
"io/ioutil"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
"reflect"
)
const (
// Time to wait after a pod creation for it's metadata to be exported
metadataWaitTime = 120 * time.Second
// Scope for Stackdriver Metadata API
MonitoringScope = "https://www.googleapis.com/auth/monitoring"
)
var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
})
f := framework.NewDefaultFramework("stackdriver-monitoring")
var kubeClient clientset.Interface
It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func() {
kubeClient = f.ClientSet
testAgent(f, kubeClient)
})
})
func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
projectId := framework.TestContext.CloudConfig.ProjectID
resourceType := "k8s_container"
uniqueContainerName := fmt.Sprintf("test-container-%v", time.Now().Unix())
endpoint := fmt.Sprintf(
"https://stackdriver.googleapis.com/v1beta2/projects/%v/resourceMetadata?filter=resource.type%%3D%v+AND+resource.label.container_name%%3D%v",
projectId,
resourceType,
uniqueContainerName)
oauthClient, err := google.DefaultClient(context.Background(), MonitoringScope)
if err != nil {
framework.Failf("Failed to create oauth client: %s", err)
}
// Create test pod with unique name.
framework.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Name = uniqueContainerName
})
defer kubeClient.CoreV1().Pods(f.Namespace.Name).Delete(uniqueContainerName, &metav1.DeleteOptions{})
// Wait a short amount of time for Metadata Agent to be created and metadata to be exported
time.Sleep(metadataWaitTime)
resp, err := oauthClient.Get(endpoint)
if err != nil {
framework.Failf("Failed to call Stackdriver Metadata API %s", err)
}
if resp.StatusCode != 200 {
framework.Failf("Stackdriver Metadata API returned error status: %s", resp.Status)
}
metadataAPIResponse, err := ioutil.ReadAll(resp.Body)
if err != nil {
framework.Failf("Failed to read response from Stackdriver Metadata API: %s", err)
}
exists, err := verifyPodExists(metadataAPIResponse, uniqueContainerName)
if err != nil {
framework.Failf("Failed to process response from Stackdriver Metadata API: %s", err)
}
if !exists {
framework.Failf("Missing Metadata for container %q", uniqueContainerName)
}
}
type Metadata struct {
Results []map[string]interface{}
}
type Resource struct {
resourceType string
resourceLabels map[string]string
}
func verifyPodExists(response []byte, containerName string) (bool, error) {
var metadata Metadata
err := json.Unmarshal(response, &metadata)
if err != nil {
return false, fmt.Errorf("Failed to unmarshall: %s", err)
}
for _, result := range metadata.Results {
rawResource, ok := result["resource"]
if !ok {
return false, fmt.Errorf("No resource entry in response from Stackdriver Metadata API")
}
resource, err := parseResource(rawResource)
if err != nil {
return false, fmt.Errorf("No 'resource' label: %s", err)
}
if resource.resourceType == "k8s_container" &&
resource.resourceLabels["container_name"] == containerName {
return true, nil
}
}
return false, nil
}
func parseResource(resource interface{}) (*Resource, error) {
var labels map[string]string = map[string]string{}
resourceMap, ok := resource.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("Resource entry is of type %s, expected map[string]interface{}", reflect.TypeOf(resource))
}
resourceType, ok := resourceMap["type"]
if !ok {
return nil, fmt.Errorf("Resource entry doesn't have a type specified")
}
resourceTypeName, ok := resourceType.(string)
if !ok {
return nil, fmt.Errorf("Resource type is of type %s, expected string", reflect.TypeOf(resourceType))
}
resourceLabels, ok := resourceMap["labels"]
if !ok {
return nil, fmt.Errorf("Resource entry doesn't have any labels specified")
}
resourceLabelMap, ok := resourceLabels.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("Resource labels entry is of type %s, expected map[string]interface{}", reflect.TypeOf(resourceLabels))
}
for label, val := range resourceLabelMap {
labels[label], ok = val.(string)
if !ok {
return nil, fmt.Errorf("Resource label %q is of type %s, expected string", label, reflect.TypeOf(val))
}
}
return &Resource{
resourceType: resourceTypeName,
resourceLabels: labels,
}, nil
}