vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -57,7 +57,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
// Check for the existence of the Kibana service.
ginkgo.By("Checking the Kibana service exists.")
s := f.ClientSet.Core().Services(metav1.NamespaceSystem)
s := f.ClientSet.CoreV1().Services(metav1.NamespaceSystem)
// Make a few attempts to connect. This makes the test robust against
// being run as the first e2e test just after the e2e cluster has been created.
err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
@ -73,7 +73,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
ginkgo.By("Checking to make sure the Kibana pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options)
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, pod := range pods.Items {
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
@ -82,7 +82,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
ginkgo.By("Checking to make sure we get a response from the Kibana UI.")
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
if err != nil {
framework.Logf("Failed to get services proxy request: %v", err)
return false, nil

View File

@ -55,7 +55,7 @@ func (p *esLogProvider) Init() error {
f := p.Framework
// Check for the existence of the Elasticsearch service.
framework.Logf("Checking the Elasticsearch service exists.")
s := f.ClientSet.Core().Services(api.NamespaceSystem)
s := f.ClientSet.CoreV1().Services(api.NamespaceSystem)
// Make a few attempts to connect. This makes the test robust against
// being run as the first e2e test just after the e2e cluster has been created.
var err error
@ -73,7 +73,7 @@ func (p *esLogProvider) Init() error {
framework.Logf("Checking to make sure the Elasticsearch pods are running")
labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String()
options := meta_v1.ListOptions{LabelSelector: labelSelector}
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options)
if err != nil {
return err
}
@ -90,7 +90,7 @@ func (p *esLogProvider) Init() error {
err = nil
var body []byte
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
if errProxy != nil {
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue
@ -124,7 +124,7 @@ func (p *esLogProvider) Init() error {
framework.Logf("Checking health of Elasticsearch service.")
healthy := false
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
if errProxy != nil {
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue
@ -172,7 +172,7 @@ func (p *esLogProvider) Cleanup() {
func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
f := p.Framework
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
if errProxy != nil {
framework.Logf("Failed to get services proxy request: %v", errProxy)
return nil

View File

@ -119,7 +119,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
framework.PodStateVerification{
Selectors: podlables,
ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
// we don't validate total log data, since there is no gaurantee all logs will be stored forever.
// we don't validate total log data, since there is no guarantee all logs will be stored forever.
// instead, we just validate that some logs are being created in std out.
Verify: func(p v1.Pod) (bool, error) {
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)

View File

@ -18,7 +18,6 @@ package stackdriver
import (
"fmt"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
@ -107,19 +106,14 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
err = utils.WaitForLogs(c, ingestionInterval, ingestionTimeout)
framework.ExpectNoError(err)
})
})
})
ginkgo.It("should ingest logs [Feature:StackdriverLogging]", func() {
withLogProviderForScope(f, podsScope, func(p *sdLogProvider) {
ginkgo.By("Checking that too long lines are trimmed", func() {
originalLength := 100001
maxLength := 100 * 1024
cmd := []string{
"/bin/sh",
"-c",
fmt.Sprintf("while :; do printf '%%*s' %d | tr ' ' 'A'; echo; sleep 60; done", originalLength),
fmt.Sprintf("while :; do printf '%%*s' %d | tr ' ' 'A'; echo; sleep 60; done", maxLength+1),
}
trimPrefix := "[Trimmed]"
pod, err := utils.StartAndReturnSelf(utils.NewExecLoggingPod("synthlogger-4", cmd), f)
framework.ExpectNoError(err, "Failed to start a pod")
@ -133,11 +127,8 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
if log.JSONPayload != nil {
return false, fmt.Errorf("got json log entry %v, wanted plain text", log.JSONPayload)
}
if len(log.TextPayload) == originalLength {
return false, fmt.Errorf("got non-trimmed entry of length %d", len(log.TextPayload))
}
if !strings.HasPrefix(log.TextPayload, trimPrefix) {
return false, fmt.Errorf("got message without prefix '%s': %s", trimPrefix, log.TextPayload)
if len(log.TextPayload) > maxLength {
return false, fmt.Errorf("got too long entry of length %d", len(log.TextPayload))
}
return true, nil
}, utils.JustTimeout, pod.Name())
@ -187,9 +178,9 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
framework.ExpectNoError(err)
})
ginkgo.By("Waiting for some docker logs to be ingested from each node", func() {
ginkgo.By("Waiting for some container runtime logs to be ingested from each node", func() {
nodeIds := utils.GetNodeIds(f.ClientSet)
log := fmt.Sprintf("projects/%s/logs/docker", framework.TestContext.CloudConfig.ProjectID)
log := fmt.Sprintf("projects/%s/logs/container-runtime", framework.TestContext.CloudConfig.ProjectID)
c := utils.NewLogChecker(p, utils.UntilFirstEntryFromLog(log), utils.JustTimeout, nodeIds...)
err := utils.WaitForLogs(c, ingestionInterval, ingestionTimeout)
framework.ExpectNoError(err)

View File

@ -20,6 +20,7 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/wait"
@ -45,6 +46,9 @@ const (
// PubSub topic with log entries polling interval
sdLoggingPollInterval = 100 * time.Millisecond
// The parallelism level of polling logs process.
sdLoggingPollParallelism = 10
)
type logProviderScope int
@ -68,6 +72,7 @@ type sdLogProvider struct {
logSink *sd.LogSink
pollingStopChannel chan struct{}
pollingWG *sync.WaitGroup
queueCollection utils.LogsQueueCollection
@ -92,7 +97,8 @@ func newSdLogProvider(f *framework.Framework, scope logProviderScope) (*sdLogPro
sdService: sdService,
pubsubService: pubsubService,
framework: f,
pollingStopChannel: make(chan struct{}, 1),
pollingStopChannel: make(chan struct{}),
pollingWG: &sync.WaitGroup{},
queueCollection: utils.NewLogsQueueCollection(maxQueueSize),
}
return provider, nil
@ -128,13 +134,14 @@ func (p *sdLogProvider) Init() error {
return fmt.Errorf("failed to wait for sink to become operational: %v", err)
}
go p.pollLogs()
p.startPollingLogs()
return nil
}
func (p *sdLogProvider) Cleanup() {
p.pollingStopChannel <- struct{}{}
close(p.pollingStopChannel)
p.pollingWG.Wait()
if p.logSink != nil {
projectID := framework.TestContext.CloudConfig.ProjectID
@ -257,44 +264,54 @@ func (p *sdLogProvider) waitSinkInit() error {
})
}
func (p *sdLogProvider) pollLogs() {
wait.PollUntil(sdLoggingPollInterval, func() (bool, error) {
messages, err := pullAndAck(p.pubsubService, p.subscription)
func (p *sdLogProvider) startPollingLogs() {
for i := 0; i < sdLoggingPollParallelism; i++ {
p.pollingWG.Add(1)
go func() {
defer p.pollingWG.Done()
wait.PollUntil(sdLoggingPollInterval, func() (bool, error) {
p.pollLogsOnce()
return false, nil
}, p.pollingStopChannel)
}()
}
}
func (p *sdLogProvider) pollLogsOnce() {
messages, err := pullAndAck(p.pubsubService, p.subscription)
if err != nil {
framework.Logf("Failed to pull messages from PubSub due to %v", err)
return
}
for _, msg := range messages {
logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data)
if err != nil {
framework.Logf("Failed to pull messages from PubSub due to %v", err)
return false, nil
framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
continue
}
for _, msg := range messages {
logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data)
if err != nil {
framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
continue
}
var sdLogEntry sd.LogEntry
if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil {
framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
continue
}
name, ok := p.tryGetName(sdLogEntry)
if !ok {
framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
continue
}
logEntry, err := convertLogEntry(sdLogEntry)
if err != nil {
framework.Logf("Failed to parse Stackdriver LogEntry: %v", err)
continue
}
p.queueCollection.Push(name, logEntry)
var sdLogEntry sd.LogEntry
if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil {
framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
continue
}
return false, nil
}, p.pollingStopChannel)
name, ok := p.tryGetName(sdLogEntry)
if !ok {
framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
continue
}
logEntry, err := convertLogEntry(sdLogEntry)
if err != nil {
framework.Logf("Failed to parse Stackdriver LogEntry: %v", err)
continue
}
p.queueCollection.Push(name, logEntry)
}
}
func (p *sdLogProvider) tryGetName(sdLogEntry sd.LogEntry) (string, bool) {
@ -311,6 +328,8 @@ func (p *sdLogProvider) tryGetName(sdLogEntry sd.LogEntry) (string, bool) {
func convertLogEntry(sdLogEntry sd.LogEntry) (entry utils.LogEntry, err error) {
entry = utils.LogEntry{LogName: sdLogEntry.LogName}
entry.Location = sdLogEntry.Resource.Labels["location"]
if sdLogEntry.TextPayload != "" {
entry.TextPayload = sdLogEntry.TextPayload
return

View File

@ -19,6 +19,7 @@ go_library(
deps = [
"//pkg/apis/core:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -21,10 +21,12 @@ import (
"time"
"fmt"
api_v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@ -101,7 +103,7 @@ func (p *loadLoggingPod) Start(f *framework.Framework) error {
Containers: []api_v1.Container{
{
Name: loggingContainerName,
Image: "gcr.io/google_containers/logs-generator:v0.1.0",
Image: imageutils.GetE2EImage(imageutils.LogsGenerator),
Env: []api_v1.EnvVar{
{
Name: "LOGS_GENERATOR_LINES_TOTAL",

View File

@ -32,6 +32,7 @@ var (
type LogEntry struct {
LogName string
TextPayload string
Location string
JSONPayload map[string]interface{}
}

View File

@ -48,6 +48,9 @@ func UntilFirstEntryFromLog(log string) IngestionPred {
return func(_ string, entries []LogEntry) (bool, error) {
for _, e := range entries {
if e.LogName == log {
if e.Location != framework.TestContext.CloudConfig.Zone {
return false, fmt.Errorf("Bad location in logs '%s' != '%d'", e.Location, framework.TestContext.CloudConfig.Zone)
}
return true, nil
}
}