vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

83
vendor/k8s.io/kubernetes/test/e2e/network/BUILD generated vendored Normal file
View File

@ -0,0 +1,83 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"dns.go",
"dns_common.go",
"dns_configmap.go",
"doc.go",
"example_cluster_dns.go",
"firewall.go",
"framework.go",
"ingress.go",
"kube_proxy.go",
"network_policy.go",
"network_tiers.go",
"networking.go",
"networking_perf.go",
"no_snat.go",
"proxy.go",
"service.go",
"service_latency.go",
"serviceloadbalancers.go",
"util_iperf.go",
],
importpath = "k8s.io/kubernetes/test/e2e/network",
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller/endpoint:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/master/ports:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/images/net/nat:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/networking/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

4
vendor/k8s.io/kubernetes/test/e2e/network/OWNERS generated vendored Normal file
View File

@ -0,0 +1,4 @@
reviewers:
- sig-network-reviewers
approvers:
- sig-network-approvers

454
vendor/k8s.io/kubernetes/test/e2e/network/dns.go generated vendored Normal file
View File

@ -0,0 +1,454 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"context"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const dnsTestPodHostName = "dns-querier-1"
const dnsTestServiceName = "dns-test-service"
func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string) *v1.Pod {
dnsPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "dns-test-" + string(uuid.NewUUID()),
Namespace: namespace,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "results",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
// TODO: Consider scraping logs instead of running a webserver.
{
Name: "webserver",
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: 80,
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "results",
MountPath: "/results",
},
},
},
{
Name: "querier",
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
Command: []string{"sh", "-c", wheezyProbeCmd},
VolumeMounts: []v1.VolumeMount{
{
Name: "results",
MountPath: "/results",
},
},
},
{
Name: "jessie-querier",
Image: imageutils.GetE2EImage(imageutils.JessieDnsutils),
Command: []string{"sh", "-c", jessieProbeCmd},
VolumeMounts: []v1.VolumeMount{
{
Name: "results",
MountPath: "/results",
},
},
},
},
},
}
dnsPod.Spec.Hostname = dnsTestPodHostName
dnsPod.Spec.Subdomain = dnsTestServiceName
return dnsPod
}
func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookupIP string, fileNamePrefix, namespace string) (string, []string) {
fileNames := make([]string, 0, len(namesToResolve)*2)
probeCmd := "for i in `seq 1 600`; do "
for _, name := range namesToResolve {
// Resolve by TCP and UDP DNS. Use $$(...) because $(...) is
// expanded by kubernetes (though this won't expand so should
// remain a literal, safe > sorry).
lookup := "A"
if strings.HasPrefix(name, "_") {
lookup = "SRV"
}
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, name)
fileNames = append(fileNames, fileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
fileName = fmt.Sprintf("%s_tcp@%s", fileNamePrefix, name)
fileNames = append(fileNames, fileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
}
for _, name := range hostEntries {
fileName := fmt.Sprintf("%s_hosts@%s", fileNamePrefix, name)
fileNames = append(fileNames, fileName)
probeCmd += fmt.Sprintf(`test -n "$$(getent hosts %s)" && echo OK > /results/%s;`, name, fileName)
}
podARecByUDPFileName := fmt.Sprintf("%s_udp@PodARecord", fileNamePrefix)
podARecByTCPFileName := fmt.Sprintf("%s_tcp@PodARecord", fileNamePrefix)
probeCmd += fmt.Sprintf(`podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".%s.pod.cluster.local"}');`, namespace)
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByUDPFileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByTCPFileName)
fileNames = append(fileNames, podARecByUDPFileName)
fileNames = append(fileNames, podARecByTCPFileName)
if len(ptrLookupIP) > 0 {
ptrLookup := fmt.Sprintf("%s.in-addr.arpa.", strings.Join(reverseArray(strings.Split(ptrLookupIP, ".")), "."))
ptrRecByUDPFileName := fmt.Sprintf("%s_udp@PTR", ptrLookupIP)
ptrRecByTCPFileName := fmt.Sprintf("%s_tcp@PTR", ptrLookupIP)
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByUDPFileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByTCPFileName)
fileNames = append(fileNames, ptrRecByUDPFileName)
fileNames = append(fileNames, ptrRecByTCPFileName)
}
probeCmd += "sleep 1; done"
return probeCmd, fileNames
}
// createTargetedProbeCommand returns a command line that performs a DNS lookup for a specific record type
func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePrefix string) (string, string) {
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, nameToResolve)
probeCmd := fmt.Sprintf("dig +short +tries=12 +norecurse %s %s > /results/%s", nameToResolve, lookup, fileName)
return probeCmd, fileName
}
func assertFilesExist(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) {
assertFilesContain(fileNames, fileDir, pod, client, false, "")
}
func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) {
var failed []string
framework.ExpectNoError(wait.Poll(time.Second*10, time.Second*600, func() (bool, error) {
failed = []string{}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
for _, fileName := range fileNames {
contents, err := client.CoreV1().RESTClient().Get().
Context(ctx).
Namespace(pod.Namespace).
Resource("pods").
SubResource("proxy").
Name(pod.Name).
Suffix(fileDir, fileName).
Do().Raw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
} else {
framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
}
failed = append(failed, fileName)
} else if check && strings.TrimSpace(string(contents)) != expected {
framework.Logf("File %s from pod %s contains '%s' instead of '%s'", fileName, pod.Name, string(contents), expected)
failed = append(failed, fileName)
}
}
if len(failed) == 0 {
return true, nil
}
framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
return false, nil
}))
Expect(len(failed)).To(Equal(0))
}
func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
By("submitting the pod to kubernetes")
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
}
// Try to find results for each expected name.
By("looking for the results for each expected name from probers")
assertFilesExist(fileNames, "results", pod, f.ClientSet)
// TODO: probe from the host, too.
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
By("submitting the pod to kubernetes")
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
}
// Try to find the expected value for each expected name.
By("looking for the results for each expected name from probers")
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
func reverseArray(arr []string) []string {
for i := 0; i < len(arr)/2; i++ {
j := len(arr) - i - 1
arr[i], arr[j] = arr[j], arr[i]
}
return arr
}
var _ = SIGDescribe("DNS", func() {
f := framework.NewDefaultFramework("dns")
/*
Testname: dns-for-clusters
Description: Make sure that DNS can resolve the names of clusters.
*/
framework.ConformanceIt("should provide DNS for the cluster ", func() {
// All the names we need to be able to resolve.
// TODO: Spin up a separate test service and test that dns works for that service.
namesToResolve := []string{
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
}
// Added due to #8512. This is critical for GCE and GKE deployments.
if framework.ProviderIs("gce", "gke") {
namesToResolve = append(namesToResolve, "google.com")
namesToResolve = append(namesToResolve, "metadata")
}
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", dnsTestPodHostName, dnsTestServiceName, f.Namespace.Name)
hostEntries := []string{hostFQDN, dnsTestPodHostName}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS")
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
})
/*
Testname: dns-for-services
Description: Make sure that DNS can resolve the names of services.
*/
framework.ConformanceIt("should provide DNS for services ", func() {
// Create a test headless service.
By("Creating a test headless service")
testServiceSelector := map[string]string{
"dns-test": "true",
}
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("deleting the test headless service")
defer GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}()
regularService := framework.CreateServiceSpec("test-service-2", "", false, testServiceSelector)
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("deleting the test service")
defer GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil)
}()
// All the names we need to be able to resolve.
// TODO: Create more endpoints and ensure that multiple A records are returned
// for headless service.
namesToResolve := []string{
fmt.Sprintf("%s", headlessService.Name),
fmt.Sprintf("%s.%s", headlessService.Name, f.Namespace.Name),
fmt.Sprintf("%s.%s.svc", headlessService.Name, f.Namespace.Name),
fmt.Sprintf("_http._tcp.%s.%s.svc", headlessService.Name, f.Namespace.Name),
fmt.Sprintf("_http._tcp.%s.%s.svc", regularService.Name, f.Namespace.Name),
}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS")
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
pod.ObjectMeta.Labels = testServiceSelector
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
})
It("should provide DNS for pods for Hostname and Subdomain", func() {
// Create a test headless service.
By("Creating a test headless service")
testServiceSelector := map[string]string{
"dns-test-hostname-attribute": "true",
}
serviceName := "dns-test-service-2"
podHostname := "dns-querier-2"
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("deleting the test headless service")
defer GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}()
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podHostname, serviceName, f.Namespace.Name)
hostNames := []string{hostFQDN, podHostname}
namesToResolve := []string{hostFQDN}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostNames, "", "wheezy", f.Namespace.Name)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostNames, "", "jessie", f.Namespace.Name)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS")
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
pod1.ObjectMeta.Labels = testServiceSelector
pod1.Spec.Hostname = podHostname
pod1.Spec.Subdomain = serviceName
validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...))
})
It("should provide DNS for ExternalName services", func() {
// Create a test ExternalName service.
By("Creating a test externalName service")
serviceName := "dns-test-service-3"
externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("deleting the test externalName service")
defer GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
}()
hostFQDN := fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name)
wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS")
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.")
// Test changing the externalName field
By("changing the externalName to bar.example.com")
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.ExternalName = "bar.example.com"
})
Expect(err).NotTo(HaveOccurred())
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a second pod to probe DNS")
pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.")
// Test changing type from ExternalName to ClusterIP
By("changing the service to type=ClusterIP")
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
}
})
Expect(err).NotTo(HaveOccurred())
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy")
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a third pod to probe DNS")
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP)
})
})

282
vendor/k8s.io/kubernetes/test/e2e/network/dns_common.go generated vendored Normal file
View File

@ -0,0 +1,282 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
type dnsTestCommon struct {
f *framework.Framework
c clientset.Interface
ns string
name string
labels []string
dnsPod *v1.Pod
utilPod *v1.Pod
utilService *v1.Service
dnsServerPod *v1.Pod
cm *v1.ConfigMap
}
func newDnsTestCommon() dnsTestCommon {
return dnsTestCommon{
f: framework.NewDefaultFramework("dns-config-map"),
ns: "kube-system",
name: "kube-dns",
}
}
func (t *dnsTestCommon) init() {
By("Finding a DNS pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(pods.Items)).Should(BeNumerically(">=", 1))
t.dnsPod = &pods.Items[0]
framework.Logf("Using DNS pod: %v", t.dnsPod.Name)
}
func (t *dnsTestCommon) checkDNSRecord(name string, predicate func([]string) bool, timeout time.Duration) {
t.checkDNSRecordFrom(name, predicate, "kube-dns", timeout)
}
func (t *dnsTestCommon) checkDNSRecordFrom(name string, predicate func([]string) bool, target string, timeout time.Duration) {
var actual []string
err := wait.PollImmediate(
time.Duration(1)*time.Second,
timeout,
func() (bool, error) {
actual = t.runDig(name, target)
if predicate(actual) {
return true, nil
}
return false, nil
})
if err != nil {
framework.Failf("dig result did not match: %#v after %v",
actual, timeout)
}
}
// runDig queries for `dnsName`. Returns a list of responses.
func (t *dnsTestCommon) runDig(dnsName, target string) []string {
cmd := []string{"/usr/bin/dig", "+short"}
switch target {
case "kube-dns":
cmd = append(cmd, "@"+t.dnsPod.Status.PodIP, "-p", "10053")
case "dnsmasq":
break
default:
panic(fmt.Errorf("invalid target: " + target))
}
cmd = append(cmd, dnsName)
stdout, stderr, err := t.f.ExecWithOptions(framework.ExecOptions{
Command: cmd,
Namespace: t.f.Namespace.Name,
PodName: t.utilPod.Name,
ContainerName: "util",
CaptureStdout: true,
CaptureStderr: true,
})
framework.Logf("Running dig: %v, stdout: %q, stderr: %q, err: %v",
cmd, stdout, stderr, err)
if stdout == "" {
return []string{}
} else {
return strings.Split(stdout, "\n")
}
}
func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) {
if t.cm != nil {
t.cm = cm
}
cm.ObjectMeta.Namespace = t.ns
cm.ObjectMeta.Name = t.name
options := metav1.ListOptions{
FieldSelector: fields.Set{
"metadata.namespace": t.ns,
"metadata.name": t.name,
}.AsSelector().String(),
}
cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(cmList.Items) == 0 {
By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm))
_, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm)
Expect(err).NotTo(HaveOccurred())
} else {
By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm))
_, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm)
Expect(err).NotTo(HaveOccurred())
}
}
func (t *dnsTestCommon) deleteConfigMap() {
By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name))
t.cm = nil
err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
Expect(err).NotTo(HaveOccurred())
}
func (t *dnsTestCommon) createUtilPod() {
// Actual port # doesn't matter, just needs to exist.
const servicePort = 10101
t.utilPod = &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: t.f.Namespace.Name,
Labels: map[string]string{"app": "e2e-dns-configmap"},
GenerateName: "e2e-dns-configmap-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "util",
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
Command: []string{"sleep", "10000"},
Ports: []v1.ContainerPort{
{ContainerPort: servicePort, Protocol: "TCP"},
},
},
},
},
}
var err error
t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Created pod %v", t.utilPod)
Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred())
t.utilService = &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: t.f.Namespace.Name,
Name: "e2e-dns-configmap",
},
Spec: v1.ServiceSpec{
Selector: map[string]string{"app": "e2e-dns-configmap"},
Ports: []v1.ServicePort{
{
Protocol: "TCP",
Port: servicePort,
TargetPort: intstr.FromInt(servicePort),
},
},
},
}
t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Created service %v", t.utilService)
}
func (t *dnsTestCommon) deleteUtilPod() {
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Logf("Delete of pod %v:%v failed: %v",
t.utilPod.Namespace, t.utilPod.Name, err)
}
}
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
t.dnsServerPod = &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: t.f.Namespace.Name,
GenerateName: "e2e-dns-configmap-dns-server-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dns",
Image: "gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5",
Command: []string{
"/usr/sbin/dnsmasq",
"-u", "root",
"-k",
"--log-facility", "-",
"-q",
},
},
},
DNSPolicy: "Default",
},
}
for name, ip := range aRecords {
t.dnsServerPod.Spec.Containers[0].Command = append(
t.dnsServerPod.Spec.Containers[0].Command,
fmt.Sprintf("-A/%v/%v", name, ip))
}
var err error
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Created pod %v", t.dnsServerPod)
Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred())
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(
t.dnsServerPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
}
func (t *dnsTestCommon) deleteDNSServerPod() {
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
if err := podClient.Delete(t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Logf("Delete of pod %v:%v failed: %v",
t.utilPod.Namespace, t.dnsServerPod.Name, err)
}
}

View File

@ -0,0 +1,195 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
. "github.com/onsi/ginkgo"
)
type dnsFederationsConfigMapTest struct {
dnsTestCommon
fedMap map[string]string
isValid bool
}
var _ = SIGDescribe("DNS configMap federations", func() {
t := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()}
BeforeEach(func() { t.c = t.f.ClientSet })
It("should be able to change federation configuration [Slow][Serial]", func() {
t.run()
})
})
func (t *dnsFederationsConfigMapTest) run() {
t.init()
defer t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
t.createUtilPod()
defer t.deleteUtilPod()
t.validate()
t.labels = []string{"abc", "ghi"}
valid1 := map[string]string{"federations": t.labels[0] + "=def"}
valid1m := map[string]string{t.labels[0]: "def"}
valid2 := map[string]string{"federations": t.labels[1] + "=xyz"}
valid2m := map[string]string{t.labels[1]: "xyz"}
invalid := map[string]string{"federations": "invalid.map=xyz"}
By("empty -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.validate()
By("valid1 -> valid2")
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
t.validate()
By("valid2 -> invalid")
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
t.validate()
By("invalid -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.validate()
By("valid1 -> deleted")
t.deleteConfigMap()
t.validate()
By("deleted -> invalid")
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
t.validate()
}
func (t *dnsFederationsConfigMapTest) validate() {
federations := t.fedMap
if len(federations) == 0 {
By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels))
for _, label := range t.labels {
var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.cluster.local.",
t.f.Namespace.Name, label)
predicate := func(actual []string) bool {
return len(actual) == 0
}
t.checkDNSRecord(federationDNS, predicate, wait.ForeverTestTimeout)
}
} else {
for label := range federations {
var federationDNS = fmt.Sprintf("%s.%s.%s.svc.cluster.local.",
t.utilService.ObjectMeta.Name, t.f.Namespace.Name, label)
var localDNS = fmt.Sprintf("%s.%s.svc.cluster.local.",
t.utilService.ObjectMeta.Name, t.f.Namespace.Name)
// Check local mapping. Checking a remote mapping requires
// creating an arbitrary DNS record which is not possible at the
// moment.
By(fmt.Sprintf("Validating federation record %v", label))
predicate := func(actual []string) bool {
for _, v := range actual {
if v == localDNS {
return true
}
}
return false
}
t.checkDNSRecord(federationDNS, predicate, wait.ForeverTestTimeout)
}
}
}
func (t *dnsFederationsConfigMapTest) setConfigMap(cm *v1.ConfigMap, fedMap map[string]string, isValid bool) {
if isValid {
t.fedMap = fedMap
}
t.isValid = isValid
t.dnsTestCommon.setConfigMap(cm)
}
func (t *dnsFederationsConfigMapTest) deleteConfigMap() {
t.isValid = false
t.dnsTestCommon.deleteConfigMap()
}
type dnsNameserverTest struct {
dnsTestCommon
}
func (t *dnsNameserverTest) run() {
t.init()
t.createUtilPod()
defer t.deleteUtilPod()
t.createDNSServer(map[string]string{
"abc.acme.local": "1.1.1.1",
"def.acme.local": "2.2.2.2",
"widget.local": "3.3.3.3",
})
defer t.deleteDNSServerPod()
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"stubDomains": fmt.Sprintf(`{"acme.local":["%v"]}`, t.dnsServerPod.Status.PodIP),
"upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP),
}})
// The ConfigMap update mechanism takes longer than the standard
// wait.ForeverTestTimeout.
moreForeverTestTimeout := 2 * 60 * time.Second
t.checkDNSRecordFrom(
"abc.acme.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "1.1.1.1" },
"dnsmasq",
moreForeverTestTimeout)
t.checkDNSRecordFrom(
"def.acme.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "2.2.2.2" },
"dnsmasq",
moreForeverTestTimeout)
t.checkDNSRecordFrom(
"widget.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "3.3.3.3" },
"dnsmasq",
moreForeverTestTimeout)
t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
// Wait for the deleted ConfigMap to take effect, otherwise the
// configuration can bleed into other tests.
t.checkDNSRecordFrom(
"abc.acme.local",
func(actual []string) bool { return len(actual) == 0 },
"dnsmasq",
moreForeverTestTimeout)
}
var _ = SIGDescribe("DNS configMap nameserver", func() {
t := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()}
BeforeEach(func() { t.c = t.f.ClientSet })
It("should be able to change stubDomain configuration [Slow][Serial]", func() {
t.run()
})
})

18
vendor/k8s.io/kubernetes/test/e2e/network/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package network are the end-to-end tests for Kubernetes networking.
package network

View File

@ -0,0 +1,172 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
dnsReadyTimeout = time.Minute
)
const queryDnsPythonTemplate string = `
import socket
try:
socket.gethostbyname('%s')
print 'ok'
except:
print 'err'`
var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
f := framework.NewDefaultFramework("cluster-dns")
var c clientset.Interface
BeforeEach(func() {
c = f.ClientSet
})
It("should create pod that uses dns", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/cluster-dns", file)
}
// contrary to the example, this test does not use contexts, for simplicity
// namespaces are passed directly.
// Also, for simplicity, we don't use yamls with namespaces, but we
// create testing namespaces instead.
backendRcYaml := mkpath("dns-backend-rc.yaml")
backendRcName := "dns-backend"
backendSvcYaml := mkpath("dns-backend-service.yaml")
backendSvcName := "dns-backend"
backendPodName := "dns-backend"
frontendPodYaml := mkpath("dns-frontend-pod.yaml")
frontendPodName := "dns-frontend"
frontendPodContainerName := "dns-frontend"
podOutput := "Hello World!"
// we need two namespaces anyway, so let's forget about
// the one created in BeforeEach and create two new ones.
namespaces := []*v1.Namespace{nil, nil}
for i := range namespaces {
var err error
namespaces[i], err = f.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil)
Expect(err).NotTo(HaveOccurred())
}
for _, ns := range namespaces {
framework.RunKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns))
}
for _, ns := range namespaces {
framework.RunKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns))
}
// wait for objects
for _, ns := range namespaces {
framework.WaitForControlledPodsRunning(c, ns.Name, backendRcName, api.Kind("ReplicationController"))
framework.WaitForService(c, ns.Name, backendSvcName, true, framework.Poll, framework.ServiceStartTimeout)
}
// it is not enough that pods are running because they may be set to running, but
// the application itself may have not been initialized. Just query the application.
for _, ns := range namespaces {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns.Name).List(options)
Expect(err).NotTo(HaveOccurred())
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
err = framework.ServiceResponding(c, ns.Name, backendSvcName)
Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond")
}
// Now another tricky part:
// It may happen that the service name is not yet in DNS.
// So if we start our pod, it will fail. We must make sure
// the name is already resolvable. So let's try to query DNS from
// the pod we have, until we find our service name.
// This complicated code may be removed if the pod itself retried after
// dns error or timeout.
// This code is probably unnecessary, but let's stay on the safe side.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(namespaces[0].Name).List(options)
if err != nil || pods == nil || len(pods.Items) == 0 {
framework.Failf("no running pods found")
}
podName := pods.Items[0].Name
queryDns := fmt.Sprintf(queryDnsPythonTemplate, backendSvcName+"."+namespaces[0].Name)
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout)
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec")
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.svc.cluster.local", fmt.Sprintf("dns-backend.%s.svc.cluster.local", namespaces[0].Name))
// create a pod in each namespace
for _, ns := range namespaces {
framework.NewKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie()
}
// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
// that we cannot wait for the pods to be running because our pods terminate by themselves.
for _, ns := range namespaces {
err := framework.WaitForPodNotPending(c, ns.Name, frontendPodName)
framework.ExpectNoError(err)
}
// wait for pods to print their result
for _, ns := range namespaces {
_, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
}
})
})
func getNsCmdFlag(ns *v1.Namespace) string {
return fmt.Sprintf("--namespace=%v", ns.Name)
}
// pass enough context with the 'old' parameter so that it replaces what your really intended.
func prepareResourceWithReplacedString(inputFile, old, new string) string {
f, err := os.Open(inputFile)
Expect(err).NotTo(HaveOccurred())
defer f.Close()
data, err := ioutil.ReadAll(f)
Expect(err).NotTo(HaveOccurred())
podYaml := strings.Replace(string(data), old, new, 1)
return podYaml
}

184
vendor/k8s.io/kubernetes/test/e2e/network/firewall.go generated vendored Normal file
View File

@ -0,0 +1,184 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("Firewall rule", func() {
var firewall_test_name = "firewall-test"
f := framework.NewDefaultFramework(firewall_test_name)
var cs clientset.Interface
var cloudConfig framework.CloudConfig
var gceCloud *gcecloud.GCECloud
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
cs = f.ClientSet
cloudConfig = framework.TestContext.CloudConfig
gceCloud = cloudConfig.Provider.(*gcecloud.GCECloud)
})
// This test takes around 6 minutes to run
It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() {
ns := f.Namespace.Name
// This source ranges is just used to examine we have exact same things on LB firewall rules
firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"}
serviceName := "firewall-test-loadbalancer"
By("Getting cluster ID")
clusterID, err := framework.GetClusterID(cs)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Got cluster ID: %v", clusterID)
jig := framework.NewServiceTestJig(cs, serviceName)
nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests)
Expect(nodeList).NotTo(BeNil())
nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests)
if len(nodesNames) <= 0 {
framework.Failf("Expect at least 1 node, got: %v", nodesNames)
}
nodesSet := sets.NewString(nodesNames...)
By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global")
svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: framework.FirewallTestHttpPort}}
svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges
})
defer func() {
jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.LoadBalancerSourceRanges = nil
})
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
By("Waiting for the local traffic health check firewall rule to be deleted")
localHCFwName := framework.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), false)
_, err := framework.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
Expect(err).NotTo(HaveOccurred())
}()
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
By("Checking if service's firewall rule is correct")
lbFw := framework.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)
fw, err := gceCloud.GetFirewall(lbFw.Name)
Expect(err).NotTo(HaveOccurred())
Expect(framework.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
By("Checking if service's nodes health check firewall rule is correct")
nodesHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)
fw, err = gceCloud.GetFirewall(nodesHCFw.Name)
Expect(err).NotTo(HaveOccurred())
Expect(framework.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
// OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE
By("Updating LoadBalancer service to ExternalTrafficPolicy=Local")
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
})
By("Waiting for the nodes health check firewall rule to be deleted")
_, err = framework.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the correct local traffic health check firewall rule to be created")
localHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
fw, err = framework.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
Expect(err).NotTo(HaveOccurred())
Expect(framework.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests))
for i, nodeName := range nodesNames {
podName := fmt.Sprintf("netexec%v", i)
jig.LaunchNetexecPodOnNode(f, nodeName, podName, framework.FirewallTestHttpPort, framework.FirewallTestUdpPort, true)
defer func() {
framework.Logf("Cleaning up the netexec pod: %v", podName)
Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred())
}()
}
// Send requests from outside of the cluster because internal traffic is whitelisted
By("Accessing the external service ip from outside, all non-master nodes should be reached")
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
// Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster
// by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect
// simulation, we really want to check that traffic doesn't reach a vm outside the GKE cluster, but
// that's much harder to do in the current e2e framework.
By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0]))
nodesSet.Delete(nodesNames[0])
// Instance could run in a different zone in multi-zone test. Figure out which zone
// it is in before proceeding.
zone := cloudConfig.Zone
if zoneInLabel, ok := nodeList.Items[0].Labels[kubeletapis.LabelZoneFailureDomain]; ok {
zone = zoneInLabel
}
removedTags := framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})
defer func() {
By("Adding tags back to the node and wait till the traffic is recovered")
nodesSet.Insert(nodesNames[0])
framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
// Make sure traffic is recovered before exit
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
}()
By("Accessing serivce through the external ip and examine got no response from the node without tags")
Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred())
})
It("should have correct firewall rules for e2e cluster", func() {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) <= 0 {
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
}
By("Checking if e2e firewall rules are correct")
for _, expFw := range framework.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {
fw, err := gceCloud.GetFirewall(expFw.Name)
Expect(err).NotTo(HaveOccurred())
Expect(framework.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
}
By("Checking well known ports on master and nodes are not exposed externally")
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
Expect(len(nodeAddrs)).NotTo(BeZero())
masterAddr := framework.GetMasterAddress(cs)
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.ControllerManagerPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
})
})

23
vendor/k8s.io/kubernetes/test/e2e/network/framework.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import "github.com/onsi/ginkgo"
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-network] "+text, body)
}

409
vendor/k8s.io/kubernetes/test/e2e/network/ingress.go generated vendored Normal file
View File

@ -0,0 +1,409 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"path/filepath"
"time"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
NEGAnnotation = "alpha.cloud.google.com/load-balancer-neg"
NEGUpdateTimeout = 2 * time.Minute
instanceGroupAnnotation = "ingress.gcp.kubernetes.io/instance-groups"
)
var _ = SIGDescribe("Loadbalancing: L7", func() {
defer GinkgoRecover()
var (
ns string
jig *framework.IngressTestJig
conformanceTests []framework.IngressConformanceTests
cloudConfig framework.CloudConfig
)
f := framework.NewDefaultFramework("ingress")
BeforeEach(func() {
jig = framework.NewIngressTestJig(f.ClientSet)
ns = f.Namespace.Name
cloudConfig = framework.TestContext.CloudConfig
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
// lying around so we don't have to race any caches
framework.BindClusterRole(jig.Client.RbacV1beta1(), "cluster-admin", f.Namespace.Name,
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
err := framework.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1beta1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
"", "create", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
})
// Before enabling this loadbalancer test in any other test list you must
// make sure the associated project has enough quota. At the time of this
// writing a GCE project is allowed 3 backend services by default. This
// test requires at least 5.
//
// Slow by design ~10m for each "It" block dominated by loadbalancer setup time
// TODO: write similar tests for nginx, haproxy and AWS Ingress.
Describe("GCE [Slow] [Feature:Ingress]", func() {
var gceController *framework.GCEIngressController
// Platform specific setup
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing gce controller")
gceController = &framework.GCEIngressController{
Ns: ns,
Client: jig.Client,
Cloud: framework.TestContext.CloudConfig,
}
gceController.Init()
})
// Platform specific cleanup
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(ns)
}
if jig.Ingress == nil {
By("No ingress created, no cleanup necessary")
return
}
By("Deleting ingress")
jig.TryDeleteIngress()
By("Cleaning up cloud resources")
framework.CleanupGCEIngressController(gceController)
})
It("should conform to Ingress spec", func() {
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
for _, t := range conformanceTests {
By(t.EntryLog)
t.Execute()
By(t.ExitLog)
jig.WaitForIngress(true)
}
})
It("should create ingress with given static-ip", func() {
// ip released when the rest of lb resources are deleted in CleanupGCEIngressController
ip := gceController.CreateStaticIP(ns)
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ns, ip))
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), ns, map[string]string{
"kubernetes.io/ingress.global-static-ip-name": ns,
"kubernetes.io/ingress.allow-http": "false",
}, map[string]string{})
By("waiting for Ingress to come up with ip: " + ip)
httpClient := framework.BuildInsecureClient(framework.IngressReqTimeout)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
By("should reject HTTP traffic")
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
By("should have correct firewall rule for ingress")
fw := gceController.GetFirewallRule()
nodeTags := []string{cloudConfig.NodeTag}
if framework.TestContext.Provider != "gce" {
// nodeTags would be different in GKE.
nodeTags = framework.GetNodeTags(jig.Client, cloudConfig)
}
expFw := jig.ConstructFirewallForIngress(gceController, nodeTags)
// Passed the last argument as `true` to verify the backend ports is a subset
// of the allowed ports in firewall rule, given there may be other existing
// ingress resources and backends we are not aware of.
Expect(framework.VerifyFirewallRule(fw, expFw, gceController.Cloud.Network, true)).NotTo(HaveOccurred())
// TODO: uncomment the restart test once we have a way to synchronize
// and know that the controller has resumed watching. If we delete
// the ingress before the controller is ready we will leak.
// By("restaring glbc")
// restarter := NewRestartConfig(
// framework.GetMasterHost(), "glbc", glbcHealthzPort, restartPollInterval, restartTimeout)
// restarter.restart()
// By("should continue serving on provided static-ip for 30 seconds")
// framework.ExpectNoError(jig.verifyURL(fmt.Sprintf("https://%v/", ip), "", 30, 1*time.Second, httpClient))
})
It("multicluster ingress should get instance group annotation", func() {
name := "echomap"
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, map[string]string{
framework.IngressClass: framework.MulticlusterIngressClassValue,
}, map[string]string{})
By(fmt.Sprintf("waiting for Ingress %s to come up", name))
pollErr := wait.Poll(2*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
framework.ExpectNoError(err)
annotations := ing.Annotations
if annotations == nil || annotations[instanceGroupAnnotation] == "" {
framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", instanceGroupAnnotation, annotations)
return false, nil
}
return true, nil
})
if pollErr != nil {
framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation))
}
// TODO(nikhiljindal): Check the instance group annotation value and verify with a multizone cluster.
})
// TODO: Implement a multizone e2e that verifies traffic reaches each
// zone based on pod labels.
})
Describe("GCE [Slow] [Feature:NEG]", func() {
var gceController *framework.GCEIngressController
// Platform specific setup
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing gce controller")
gceController = &framework.GCEIngressController{
Ns: ns,
Client: jig.Client,
Cloud: framework.TestContext.CloudConfig,
}
gceController.Init()
})
// Platform specific cleanup
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(ns)
}
if jig.Ingress == nil {
By("No ingress created, no cleanup necessary")
return
}
By("Deleting ingress")
jig.TryDeleteIngress()
By("Cleaning up cloud resources")
framework.CleanupGCEIngressController(gceController)
})
It("should conform to Ingress spec", func() {
jig.PollInterval = 5 * time.Second
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{
NEGAnnotation: "true",
})
for _, t := range conformanceTests {
By(t.EntryLog)
t.Execute()
By(t.ExitLog)
jig.WaitForIngress(true)
usingNeg, err := gceController.BackendServiceUsingNEG(jig.GetIngressNodePorts(false))
Expect(err).NotTo(HaveOccurred())
Expect(usingNeg).To(BeTrue())
}
})
It("should be able to switch between IG and NEG modes", func() {
var err error
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetIngressNodePorts(false))
Expect(err).NotTo(HaveOccurred())
Expect(usingNEG).To(BeTrue())
By("Switch backend service to use IG")
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[NEGAnnotation] = "false"
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
return gceController.BackendServiceUsingIG(jig.GetIngressNodePorts(true))
})
jig.WaitForIngress(true)
By("Switch backend service to use NEG")
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[NEGAnnotation] = "true"
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
return gceController.BackendServiceUsingNEG(jig.GetIngressNodePorts(false))
})
jig.WaitForIngress(true)
})
It("should sync endpoints to NEG", func() {
name := "hostname"
scaleAndValidateNEG := func(num int) {
scale, err := f.ClientSet.ExtensionsV1beta1().Deployments(ns).GetScale(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if scale.Spec.Replicas != int32(num) {
scale.Spec.Replicas = int32(num)
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
Expect(err).NotTo(HaveOccurred())
}
wait.Poll(5*time.Second, NEGUpdateTimeout, func() (bool, error) {
res, err := jig.GetDistinctResponseFromIngress()
if err != nil {
return false, err
}
return res.Len() == num, err
})
}
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetIngressNodePorts(false))
Expect(err).NotTo(HaveOccurred())
Expect(usingNEG).To(BeTrue())
// initial replicas number is 1
scaleAndValidateNEG(1)
By("Scale up number of backends to 5")
scaleAndValidateNEG(5)
By("Scale down number of backends to 3")
scaleAndValidateNEG(3)
By("Scale up number of backends to 6")
scaleAndValidateNEG(6)
By("Scale down number of backends to 2")
scaleAndValidateNEG(3)
})
It("rolling update backend pods should not cause service disruption", func() {
name := "hostname"
replicas := 8
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetIngressNodePorts(false))
Expect(err).NotTo(HaveOccurred())
Expect(usingNEG).To(BeTrue())
By(fmt.Sprintf("Scale backend replicas to %d", replicas))
scale, err := f.ClientSet.ExtensionsV1beta1().Deployments(ns).GetScale(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
scale.Spec.Replicas = int32(replicas)
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
Expect(err).NotTo(HaveOccurred())
wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
res, err := jig.GetDistinctResponseFromIngress()
if err != nil {
return false, err
}
return res.Len() == replicas, err
})
By("Trigger rolling update and observe service disruption")
deploy, err := f.ClientSet.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// trigger by changing graceful termination period to 60 seconds
gracePeriod := int64(60)
deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).Update(deploy)
Expect(err).NotTo(HaveOccurred())
wait.Poll(30*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
res, err := jig.GetDistinctResponseFromIngress()
Expect(err).NotTo(HaveOccurred())
deploy, err := f.ClientSet.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if int(deploy.Status.UpdatedReplicas) == replicas {
if res.Len() == replicas {
return true, nil
} else {
framework.Logf("Expecting %d different responses, but got %d.", replicas, res.Len())
return false, nil
}
} else {
framework.Logf("Waiting for rolling update to finished. Keep sending traffic.")
return false, nil
}
})
})
})
// Time: borderline 5m, slow by design
Describe("[Slow] Nginx", func() {
var nginxController *framework.NginxIngressController
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing nginx controller")
jig.Class = "nginx"
nginxController = &framework.NginxIngressController{Ns: ns, Client: jig.Client}
// TODO: This test may fail on other platforms. We can simply skip it
// but we want to allow easy testing where a user might've hand
// configured firewalls.
if framework.ProviderIs("gce", "gke") {
framework.ExpectNoError(framework.GcloudComputeResourceCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
} else {
framework.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.")
}
nginxController.Init()
})
AfterEach(func() {
if framework.ProviderIs("gce", "gke") {
framework.ExpectNoError(framework.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
}
if CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(ns)
}
if jig.Ingress == nil {
By("No ingress created, no cleanup necessary")
return
}
By("Deleting ingress")
jig.TryDeleteIngress()
})
It("should conform to Ingress spec", func() {
// Poll more frequently to reduce e2e completion time.
// This test runs in presubmit.
jig.PollInterval = 5 * time.Second
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
for _, t := range conformanceTests {
By(t.EntryLog)
t.Execute()
By(t.ExitLog)
jig.WaitForIngress(false)
}
})
})
})

208
vendor/k8s.io/kubernetes/test/e2e/network/kube_proxy.go generated vendored Normal file
View File

@ -0,0 +1,208 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/images/net/nat"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var kubeProxyE2eImage = imageutils.GetE2EImage(imageutils.Net)
var _ = SIGDescribe("Network", func() {
const (
testDaemonHttpPort = 11301
testDaemonTcpPort = 11302
timeoutSeconds = 10
postFinTimeoutSeconds = 5
)
fr := framework.NewDefaultFramework("network")
It("should set TCP CLOSE_WAIT timeout", func() {
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
ips := framework.CollectAddresses(nodes, v1.NodeInternalIP)
if len(nodes.Items) < 2 {
framework.Skipf(
"Test requires >= 2 Ready nodes, but there are only %v nodes",
len(nodes.Items))
}
type NodeInfo struct {
node *v1.Node
name string
nodeIp string
}
clientNodeInfo := NodeInfo{
node: &nodes.Items[0],
name: nodes.Items[0].Name,
nodeIp: ips[0],
}
serverNodeInfo := NodeInfo{
node: &nodes.Items[1],
name: nodes.Items[1].Name,
nodeIp: ips[1],
}
zero := int64(0)
clientPodSpec := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-net-client",
Namespace: fr.Namespace.Name,
Labels: map[string]string{"app": "e2e-net-client"},
},
Spec: v1.PodSpec{
NodeName: clientNodeInfo.name,
Containers: []v1.Container{
{
Name: "e2e-net-client",
Image: kubeProxyE2eImage,
ImagePullPolicy: "Always",
Command: []string{
"/net", "-serve", fmt.Sprintf("0.0.0.0:%d", testDaemonHttpPort),
},
},
},
TerminationGracePeriodSeconds: &zero,
},
}
serverPodSpec := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-net-server",
Namespace: fr.Namespace.Name,
Labels: map[string]string{"app": "e2e-net-server"},
},
Spec: v1.PodSpec{
NodeName: serverNodeInfo.name,
Containers: []v1.Container{
{
Name: "e2e-net-server",
Image: kubeProxyE2eImage,
ImagePullPolicy: "Always",
Command: []string{
"/net",
"-runner", "nat-closewait-server",
"-options",
fmt.Sprintf(`{"LocalAddr":"0.0.0.0:%v", "PostFindTimeoutSeconds":%v}`,
testDaemonTcpPort,
postFinTimeoutSeconds),
},
Ports: []v1.ContainerPort{
{
Name: "tcp",
ContainerPort: testDaemonTcpPort,
HostPort: testDaemonTcpPort,
},
},
},
},
TerminationGracePeriodSeconds: &zero,
},
}
By(fmt.Sprintf(
"Launching a server daemon on node %v (node ip: %v, image: %v)",
serverNodeInfo.name,
serverNodeInfo.nodeIp,
kubeProxyE2eImage))
fr.PodClient().CreateSync(serverPodSpec)
By(fmt.Sprintf(
"Launching a client daemon on node %v (node ip: %v, image: %v)",
clientNodeInfo.name,
clientNodeInfo.nodeIp,
kubeProxyE2eImage))
fr.PodClient().CreateSync(clientPodSpec)
By("Make client connect")
options := nat.CloseWaitClientOptions{
RemoteAddr: fmt.Sprintf("%v:%v",
serverNodeInfo.nodeIp, testDaemonTcpPort),
TimeoutSeconds: timeoutSeconds,
PostFinTimeoutSeconds: 0,
LeakConnection: true,
}
jsonBytes, err := json.Marshal(options)
cmd := fmt.Sprintf(
`curl -X POST http://localhost:%v/run/nat-closewait-client -d `+
`'%v' 2>/dev/null`,
testDaemonHttpPort,
string(jsonBytes))
framework.RunHostCmdOrDie(fr.Namespace.Name, "e2e-net-client", cmd)
<-time.After(time.Duration(1) * time.Second)
By("Checking /proc/net/nf_conntrack for the timeout")
// If test flakes occur here, then this check should be performed
// in a loop as there may be a race with the client connecting.
framework.IssueSSHCommandWithResult(
fmt.Sprintf("sudo cat /proc/net/ip_conntrack | grep 'dport=%v'",
testDaemonTcpPort),
framework.TestContext.Provider,
clientNodeInfo.node)
// Timeout in seconds is available as the third column from
// /proc/net/ip_conntrack.
result, err := framework.IssueSSHCommandWithResult(
fmt.Sprintf(
"sudo cat /proc/net/ip_conntrack "+
"| grep 'CLOSE_WAIT.*dst=%v.*dport=%v' "+
"| tail -n 1"+
"| awk '{print $3}' ",
serverNodeInfo.nodeIp,
testDaemonTcpPort),
framework.TestContext.Provider,
clientNodeInfo.node)
framework.ExpectNoError(err)
timeoutSeconds, err := strconv.Atoi(strings.TrimSpace(result.Stdout))
framework.ExpectNoError(err)
// These must be synchronized from the default values set in
// pkg/apis/../defaults.go ConntrackTCPCloseWaitTimeout. The
// current defaults are hidden in the initialization code.
const epsilonSeconds = 60
const expectedTimeoutSeconds = 60 * 60
framework.Logf("conntrack entry timeout was: %v, expected: %v",
timeoutSeconds, expectedTimeoutSeconds)
Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should(
BeNumerically("<", (epsilonSeconds)))
})
})

View File

@ -0,0 +1,581 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
/*
The following Network Policy tests verify that policy object definitions
are correctly enforced by a networking plugin. It accomplishes this by launching
a simple netcat server, and two clients with different
attributes. Each test case creates a network policy which should only allow
connections from one of the clients. The test then asserts that the clients
failed or successfully connected as expected.
*/
var _ = SIGDescribe("NetworkPolicy", func() {
var service *v1.Service
var podServer *v1.Pod
f := framework.NewDefaultFramework("network-policy")
Context("NetworkPolicy between server and client", func() {
BeforeEach(func() {
By("Creating a simple server that serves on port 80 and 81.")
podServer, service = createServerPodAndService(f, f.Namespace, "server", []int{80, 81})
By("Waiting for pod ready", func() {
err := f.WaitForPodReady(podServer.Name)
Expect(err).NotTo(HaveOccurred())
})
// Create pods, which should be able to communicate with the server on port 80 and 81.
By("Testing pods can connect to both ports when no policy is present.")
testCanConnect(f, f.Namespace, "client-can-connect-80", service, 80)
testCanConnect(f, f.Namespace, "client-can-connect-81", service, 81)
})
AfterEach(func() {
cleanupServerPodAndService(f, podServer, service)
})
It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() {
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "deny-all",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
Ingress: []networkingv1.NetworkPolicyIngressRule{},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred())
defer cleanupNetworkPolicy(f, policy)
// Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server,
// but should not be able to now that isolation is on.
testCannotConnect(f, f.Namespace, "client-cannot-connect", service, 80)
})
It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() {
By("Creating a network policy for the server which allows traffic from the pod 'client-a'.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-client-a-via-pod-selector",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to the Server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServer.Name,
},
},
// Allow traffic only from client-a
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": "client-a",
},
},
}},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred())
defer cleanupNetworkPolicy(f, policy)
By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
})
By("Creating client-b which should not be able to contact the server.", func() {
testCannotConnect(f, f.Namespace, "client-b", service, 80)
})
})
It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() {
nsA := f.Namespace
nsBName := f.BaseName + "-b"
// The CreateNamespace helper uses the input name as a Name Generator, so the namespace itself
// will have a different name than what we are setting as the value of ns-name.
// This is fine as long as we don't try to match the label as nsB.Name in our policy.
nsB, err := f.CreateNamespace(nsBName, map[string]string{
"ns-name": nsBName,
})
Expect(err).NotTo(HaveOccurred())
// Create Server with Service in NS-B
framework.Logf("Waiting for server to come up.")
err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer)
Expect(err).NotTo(HaveOccurred())
// Create Policy for that service that allows traffic only via namespace B
By("Creating a network policy for the server which allows traffic from namespace-b.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ns-b-via-namespace-selector",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply to server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServer.Name,
},
},
// Allow traffic only from NS-B
Ingress: []networkingv1.NetworkPolicyIngressRule{{
From: []networkingv1.NetworkPolicyPeer{{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns-name": nsBName,
},
},
}},
}},
},
}
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy)
Expect(err).NotTo(HaveOccurred())
defer cleanupNetworkPolicy(f, policy)
testCannotConnect(f, nsA, "client-a", service, 80)
testCanConnect(f, nsB, "client-b", service, 80)
})
It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
By("Creating a network policy for the Service which allows traffic only to one port.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress-on-port-81",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply to server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServer.Name,
},
},
// Allow traffic only to one port.
Ingress: []networkingv1.NetworkPolicyIngressRule{{
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{IntVal: 81},
}},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred())
defer cleanupNetworkPolicy(f, policy)
By("Testing pods can connect only to the port allowed by the policy.")
testCannotConnect(f, f.Namespace, "client-a", service, 80)
testCanConnect(f, f.Namespace, "client-b", service, 81)
})
It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() {
By("Creating a network policy for the Service which allows traffic only to one port.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress-on-port-80",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply to server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServer.Name,
},
},
// Allow traffic only to one port.
Ingress: []networkingv1.NetworkPolicyIngressRule{{
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{IntVal: 80},
}},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred())
defer cleanupNetworkPolicy(f, policy)
By("Creating a network policy for the Service which allows traffic only to another port.")
policy2 := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress-on-port-81",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply to server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServer.Name,
},
},
// Allow traffic only to one port.
Ingress: []networkingv1.NetworkPolicyIngressRule{{
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{IntVal: 81},
}},
}},
},
}
policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy2)
Expect(err).NotTo(HaveOccurred())
defer cleanupNetworkPolicy(f, policy2)
By("Testing pods can connect to both ports when both policies are present.")
testCanConnect(f, f.Namespace, "client-a", service, 80)
testCanConnect(f, f.Namespace, "client-b", service, 81)
})
It("should support allow-all policy [Feature:NetworkPolicy]", func() {
By("Creating a network policy which allows all traffic.")
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-all",
},
Spec: networkingv1.NetworkPolicySpec{
// Allow all traffic
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
Ingress: []networkingv1.NetworkPolicyIngressRule{{}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred())
defer cleanupNetworkPolicy(f, policy)
By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
testCanConnect(f, f.Namespace, "client-a", service, 80)
testCanConnect(f, f.Namespace, "client-b", service, 81)
})
It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() {
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-client-a-via-named-port-ingress-rule",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to the Server
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": podServer.Name,
},
},
// Allow traffic to only one named port: "serve-80".
Ingress: []networkingv1.NetworkPolicyIngressRule{{
Ports: []networkingv1.NetworkPolicyPort{{
Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80"},
}},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred())
defer cleanupNetworkPolicy(f, policy)
By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80)
})
By("Creating client-b which should not be able to contact the server on port 81.", func() {
testCannotConnect(f, f.Namespace, "client-b", service, 81)
})
})
It("should allow egress access on one named port [Feature:NetworkPolicy]", func() {
clientPodName := "client-a"
protocolUDP := v1.ProtocolUDP
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "allow-client-a-via-named-port-egress-rule",
},
Spec: networkingv1.NetworkPolicySpec{
// Apply this policy to client-a
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"pod-name": clientPodName,
},
},
// Allow traffic to only one named port: "serve-80".
Egress: []networkingv1.NetworkPolicyEgressRule{{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80"},
},
// Allow DNS look-ups
{
Protocol: &protocolUDP,
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
},
},
}},
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred())
defer cleanupNetworkPolicy(f, policy)
By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, clientPodName, service, 80)
})
By("Creating client-a which should not be able to contact the server on port 81.", func() {
testCannotConnect(f, f.Namespace, clientPodName, service, 81)
})
})
})
})
func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) {
By(fmt.Sprintf("Creating client pod %s that should successfully connect to %s.", podName, service.Name))
podClient := createNetworkClientPod(f, ns, podName, service, targetPort)
defer func() {
By(fmt.Sprintf("Cleaning up the pod %s", podName))
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil {
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
}
}()
framework.Logf("Waiting for %s to complete.", podClient.Name)
err := framework.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name)
Expect(err).NotTo(HaveOccurred(), "Pod did not finish as expected.")
framework.Logf("Waiting for %s to complete.", podClient.Name)
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
if err != nil {
// Collect pod logs when we see a failure.
logs, logErr := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName))
if logErr != nil {
framework.Failf("Error getting container logs: %s", logErr)
}
// Collect current NetworkPolicies applied in the test namespace.
policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
}
// Collect the list of pods running in the test namespace.
podsInNS, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
if err != nil {
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
}
pods := []string{}
for _, p := range podsInNS {
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
}
framework.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podName, service.Name, logs, policies.Items, pods)
// Dump debug information for the test namespace.
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
}
}
func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) {
By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name))
podClient := createNetworkClientPod(f, ns, podName, service, targetPort)
defer func() {
By(fmt.Sprintf("Cleaning up the pod %s", podName))
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil {
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
}
}()
framework.Logf("Waiting for %s to complete.", podClient.Name)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
// We expect an error here since it's a cannot connect test.
// Dump debug information if the error was nil.
if err == nil {
// Collect pod logs when we see a failure.
logs, logErr := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName))
if logErr != nil {
framework.Failf("Error getting container logs: %s", logErr)
}
// Collect current NetworkPolicies applied in the test namespace.
policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
}
// Collect the list of pods running in the test namespace.
podsInNS, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
if err != nil {
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
}
pods := []string{}
for _, p := range podsInNS {
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
}
framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podName, service.Name, logs, policies.Items, pods)
// Dump debug information for the test namespace.
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
}
}
// Create a server pod with a listening container for each port in ports[].
// Will also assign a pod label with key: "pod-name" and label set to the given podname for later use by the network
// policy.
func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, podName string, ports []int) (*v1.Pod, *v1.Service) {
// Because we have a variable amount of ports, we'll first loop through and generate our Containers for our pod,
// and ServicePorts.for our Service.
containers := []v1.Container{}
servicePorts := []v1.ServicePort{}
for _, port := range ports {
// Build the containers for the server pod.
containers = append(containers, v1.Container{
Name: fmt.Sprintf("%s-container-%d", podName, port),
Image: imageutils.GetE2EImage(imageutils.Porter),
Env: []v1.EnvVar{
{
Name: fmt.Sprintf("SERVE_PORT_%d", port),
Value: "foo",
},
},
Ports: []v1.ContainerPort{
{
ContainerPort: int32(port),
Name: fmt.Sprintf("serve-%d", port),
},
},
ReadinessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/",
Port: intstr.IntOrString{
IntVal: int32(port),
},
Scheme: v1.URISchemeHTTP,
},
},
},
})
// Build the Service Ports for the service.
servicePorts = append(servicePorts, v1.ServicePort{
Name: fmt.Sprintf("%s-%d", podName, port),
Port: int32(port),
TargetPort: intstr.FromInt(port),
})
}
By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name))
pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{
"pod-name": podName,
},
},
Spec: v1.PodSpec{
Containers: containers,
RestartPolicy: v1.RestartPolicyNever,
},
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Created pod %v", pod.ObjectMeta.Name)
svcName := fmt.Sprintf("svc-%s", podName)
By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name))
svc, err := f.ClientSet.CoreV1().Services(namespace.Name).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: svcName,
},
Spec: v1.ServiceSpec{
Ports: servicePorts,
Selector: map[string]string{
"pod-name": podName,
},
},
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Created service %s", svc.Name)
return pod, svc
}
func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) {
By("Cleaning up the server.")
if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
framework.Failf("unable to cleanup pod %v: %v", pod.Name, err)
}
By("Cleaning up the server's service.")
if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil {
framework.Failf("unable to cleanup svc %v: %v", service.Name, err)
}
}
// Create a client pod which will attempt a netcat to the provided service, on the specified port.
// This client will attempt a one-shot connection, then die, without restarting the pod.
// Test can then be asserted based on whether the pod quit with an error or not.
func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, podName string, targetService *v1.Service, targetPort int) *v1.Pod {
pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{
"pod-name": podName,
},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: fmt.Sprintf("%s-container", podName),
Image: "busybox",
Args: []string{
"/bin/sh",
"-c",
fmt.Sprintf("for i in $(seq 1 5); do wget -T 8 %s.%s:%d -O - && exit 0 || sleep 1; done; exit 1",
targetService.Name, targetService.Namespace, targetPort),
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
return pod
}
func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) {
By("Cleaning up the policy.")
if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil {
framework.Failf("unable to cleanup policy %v: %v", policy.Name, err)
}
}

View File

@ -0,0 +1,241 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"net/http"
"time"
computealpha "google.golang.org/api/compute/v0.alpha"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/cloudprovider"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
f := framework.NewDefaultFramework("services")
var cs clientset.Interface
var internalClientset internalclientset.Interface
serviceLBNames := []string{}
BeforeEach(func() {
// This test suite requires the GCE environment.
framework.SkipUnlessProviderIs("gce")
cs = f.ClientSet
internalClientset = f.InternalClientset
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
framework.DescribeSvc(f.Namespace.Name)
}
for _, lb := range serviceLBNames {
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
serviceLBNames = []string{}
})
It("should be able to create and tear down a standard-tier load balancer [Slow]", func() {
lagTimeout := framework.LoadBalancerLagTimeoutDefault
createTimeout := framework.GetServiceLoadBalancerCreationTimeout(cs)
svcName := "net-tiers-svc"
ns := f.Namespace.Name
jig := framework.NewServiceTestJig(cs, svcName)
By("creating a pod to be part of the service " + svcName)
jig.RunOrFail(ns, nil)
// Test 1: create a standard tiered LB for the Service.
By("creating a Service of type LoadBalancer using the standard network tier")
svc := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard)
})
// Verify that service has been updated properly.
svcTier, err := gcecloud.GetServiceNetworkTier(svc)
Expect(err).NotTo(HaveOccurred())
Expect(svcTier).To(Equal(gcecloud.NetworkTierStandard))
// Record the LB name for test cleanup.
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
// Wait and verify the LB.
ingressIP := waitAndVerifyLBWithTier(jig, ns, svcName, "", createTimeout, lagTimeout)
// Test 2: re-create a LB of a different tier for the updated Service.
By("updating the Service to use the premium (default) tier")
svc = jig.UpdateServiceOrFail(ns, svcName, func(svc *v1.Service) {
clearNetworkTier(svc)
})
// Verify that service has been updated properly.
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
Expect(err).NotTo(HaveOccurred())
Expect(svcTier).To(Equal(gcecloud.NetworkTierDefault))
// Wait until the ingress IP changes. Each tier has its own pool of
// IPs, so changing tiers implies changing IPs.
ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout)
// Test 3: create a standard-tierd LB with a user-requested IP.
By("reserving a static IP for the load balancer")
requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunId)
requestedIP, err := reserveAlphaRegionalAddress(requestedAddrName, gcecloud.NetworkTierStandard)
Expect(err).NotTo(HaveOccurred(), "failed to reserve a STANDARD tiered address")
defer func() {
if requestedAddrName != "" {
// Release GCE static address - this is not kube-managed and will not be automatically released.
if err := framework.DeleteGCEStaticIP(requestedAddrName); err != nil {
framework.Logf("failed to release static IP address %q: %v", requestedAddrName, err)
}
}
}()
Expect(err).NotTo(HaveOccurred())
framework.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP)
By("updating the Service to use the standard tier with a requested IP")
svc = jig.UpdateServiceOrFail(ns, svc.Name, func(svc *v1.Service) {
svc.Spec.LoadBalancerIP = requestedIP
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard)
})
// Verify that service has been updated properly.
Expect(svc.Spec.LoadBalancerIP).To(Equal(requestedIP))
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
Expect(err).NotTo(HaveOccurred())
Expect(svcTier).To(Equal(gcecloud.NetworkTierStandard))
// Wait until the ingress IP changes and verifies the LB.
ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout)
})
})
func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existingIP string, waitTimeout, checkTimeout time.Duration) string {
var svc *v1.Service
if existingIP == "" {
// Creating the LB for the first time; wait for any ingress IP to show
// up.
svc = jig.WaitForNewIngressIPOrFail(ns, svcName, existingIP, waitTimeout)
} else {
// Re-creating the LB; wait for the ingress IP to change.
svc = jig.WaitForNewIngressIPOrFail(ns, svcName, existingIP, waitTimeout)
}
svcPort := int(svc.Spec.Ports[0].Port)
lbIngress := &svc.Status.LoadBalancer.Ingress[0]
ingressIP := framework.GetIngressPoint(lbIngress)
By("running sanity and reachability checks")
if svc.Spec.LoadBalancerIP != "" {
// Verify that the new ingress IP is the requested IP if it's set.
Expect(ingressIP).To(Equal(svc.Spec.LoadBalancerIP))
}
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
// If the IP has been used by previous test, sometimes we get the lingering
// 404 errors even after the LB is long gone. Tolerate and retry until the
// the new LB is fully established since this feature is still Alpha in GCP.
jig.TestReachableHTTPWithRetriableErrorCodes(ingressIP, svcPort, []int{http.StatusNotFound}, checkTimeout)
// Verify the network tier matches the desired.
svcNetTier, err := gcecloud.GetServiceNetworkTier(svc)
Expect(err).NotTo(HaveOccurred())
netTier, err := getLBNetworkTierByIP(ingressIP)
Expect(err).NotTo(HaveOccurred(), "failed to get the network tier of the load balancer")
Expect(netTier).To(Equal(svcNetTier))
return ingressIP
}
func getLBNetworkTierByIP(ip string) (gcecloud.NetworkTier, error) {
var rule *computealpha.ForwardingRule
// Retry a few times to tolerate flakes.
err := wait.PollImmediate(5*time.Second, 15*time.Second, func() (bool, error) {
obj, err := getGCEForwardingRuleByIP(ip)
if err != nil {
return false, err
}
rule = obj
return true, nil
})
if err != nil {
return "", err
}
return gcecloud.NetworkTierGCEValueToType(rule.NetworkTier), nil
}
func getGCEForwardingRuleByIP(ip string) (*computealpha.ForwardingRule, error) {
cloud, err := framework.GetGCECloud()
if err != nil {
return nil, err
}
ruleList, err := cloud.ListAlphaRegionForwardingRules(cloud.Region())
if err != nil {
return nil, err
}
for _, rule := range ruleList.Items {
if rule.IPAddress == ip {
return rule, nil
}
}
return nil, fmt.Errorf("forwarding rule with ip %q not found", ip)
}
func setNetworkTier(svc *v1.Service, tier string) {
key := gcecloud.NetworkTierAnnotationKey
if svc.ObjectMeta.Annotations == nil {
svc.ObjectMeta.Annotations = map[string]string{}
}
svc.ObjectMeta.Annotations[key] = tier
}
func clearNetworkTier(svc *v1.Service) {
key := gcecloud.NetworkTierAnnotationKey
if svc.ObjectMeta.Annotations == nil {
return
}
delete(svc.ObjectMeta.Annotations, key)
}
// TODO: add retries if this turns out to be flaky.
// TODO(#51665): remove this helper function once Network Tiers becomes beta.
func reserveAlphaRegionalAddress(name string, netTier gcecloud.NetworkTier) (string, error) {
cloud, err := framework.GetGCECloud()
alphaAddr := &computealpha.Address{
Name: name,
NetworkTier: netTier.ToGCEValue(),
}
if err := cloud.ReserveAlphaRegionAddress(alphaAddr, cloud.Region()); err != nil {
return "", err
}
addr, err := cloud.GetRegionAddress(name, cloud.Region())
if err != nil {
return "", err
}
return addr.Address, nil
}

237
vendor/k8s.io/kubernetes/test/e2e/network/networking.go generated vendored Normal file
View File

@ -0,0 +1,237 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"net/http"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = SIGDescribe("Networking", func() {
var svcname = "nettest"
f := framework.NewDefaultFramework(svcname)
BeforeEach(func() {
// Assert basic external connectivity.
// Since this is not really a test of kubernetes in any way, we
// leave it as a pre-test assertion, rather than a Ginko test.
By("Executing a successful http request from the external internet")
resp, err := http.Get("http://google.com")
if err != nil {
framework.Failf("Unable to connect/talk to the internet: %v", err)
}
if resp.StatusCode != http.StatusOK {
framework.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
}
})
It("should provide Internet connection for containers [Feature:Networking-IPv4]", func() {
By("Running container which tries to ping 8.8.8.8")
framework.ExpectNoError(
framework.CheckConnectivityToHost(f, "", "ping-test", "8.8.8.8", framework.IPv4PingCommand, 30))
})
It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental]", func() {
By("Running container which tries to ping google.com")
framework.ExpectNoError(
framework.CheckConnectivityToHost(f, "", "ping-test", "google.com", framework.IPv6PingCommand, 30))
})
// First test because it has no dependencies on variables created later on.
It("should provide unchanging, static URL paths for kubernetes api services", func() {
tests := []struct {
path string
}{
{path: "/healthz"},
{path: "/api"},
{path: "/apis"},
{path: "/metrics"},
{path: "/swaggerapi"},
{path: "/version"},
// TODO: test proxy links here
}
if !framework.ProviderIs("gke", "skeleton") {
tests = append(tests, struct{ path string }{path: "/logs"})
}
for _, test := range tests {
By(fmt.Sprintf("testing: %s", test.path))
data, err := f.ClientSet.CoreV1().RESTClient().Get().
AbsPath(test.path).
DoRaw()
if err != nil {
framework.Failf("Failed: %v\nBody: %s", err, string(data))
}
}
})
It("should check kube-proxy urls", func() {
// TODO: this is overkill we just need the host networking pod
// to hit kube-proxy urls.
config := framework.NewNetworkingTestConfig(f)
By("checking kube-proxy URLs")
config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "200 OK")
// Verify /healthz returns the proper content.
config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "lastUpdated")
// Verify /proxyMode returns http status code 200.
config.GetSelfURLStatusCode(ports.ProxyStatusPort, "/proxyMode", "200")
})
// TODO: Remove [Slow] when this has had enough bake time to prove presubmit worthiness.
Describe("Granular Checks: Services [Slow]", func() {
It("should function for pod-Service: http", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHttpPort))
config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHttpPort))
config.DialFromTestContainer("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
})
It("should function for pod-Service: udp", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUdpPort))
config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUdpPort))
config.DialFromTestContainer("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())
})
It("should function for node-Service: http", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterHttpPort))
config.DialFromNode("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHttpPort))
config.DialFromNode("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
})
It("should function for node-Service: udp", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterUdpPort))
config.DialFromNode("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUdpPort))
config.DialFromNode("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())
})
It("should function for endpoint-Service: http", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterHttpPort))
config.DialFromEndpointContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHttpPort))
config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
})
It("should function for endpoint-Service: udp", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterUdpPort))
config.DialFromEndpointContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUdpPort))
config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())
})
It("should update endpoints: http", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHttpPort))
config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNetProxyPod()
By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHttpPort))
config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHttpPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
})
It("should update endpoints: udp", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUdpPort))
config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNetProxyPod()
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUdpPort))
config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUdpPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
})
// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
It("should update nodePort: http [Slow]", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHttpPort))
config.DialFromNode("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNodePortService()
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHttpPort))
config.DialFromNode("http", config.NodeIP, config.NodeHttpPort, config.MaxTries, config.MaxTries, sets.NewString())
})
// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
It("should update nodePort: udp [Slow]", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUdpPort))
config.DialFromNode("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNodePortService()
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUdpPort))
config.DialFromNode("udp", config.NodeIP, config.NodeUdpPort, config.MaxTries, config.MaxTries, sets.NewString())
})
It("should function for client IP based session affinity: http", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHttpPort))
// Check if number of endpoints returned are exactly one.
eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHttpPort, framework.SessionAffinityChecks)
if err != nil {
framework.Failf("Failed to get endpoints from test container, error: %v", err)
}
if len(eps) == 0 {
framework.Failf("Unexpected no endpoints return")
}
if len(eps) > 1 {
framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
}
})
It("should function for client IP based session affinity: udp", func() {
config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUdpPort))
// Check if number of endpoints returned are exactly one.
eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUdpPort, framework.SessionAffinityChecks)
if err != nil {
framework.Failf("Failed to get endpoints from test container, error: %v", err)
}
if len(eps) == 0 {
framework.Failf("Unexpected no endpoints return")
}
if len(eps) > 1 {
framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
}
})
})
})

View File

@ -0,0 +1,172 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
// Tests network performance using iperf or other containers.
import (
"fmt"
"math"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
// empirically derived as a baseline for expectations from running this test using kube-up.sh.
gceBandwidthBitsEstimate = int64(30000000000)
// on 4 node clusters, we found this test passes very quickly, generally in less then 100 seconds.
smallClusterTimeout = 200 * time.Second
)
// networkingIPerf test runs iperf on a container in either IPv4 or IPv6 mode.
func networkingIPerfTest(isIPv6 bool) {
f := framework.NewDefaultFramework("network-perf")
// A few simple bandwidth tests which are capped by nodes.
// TODO replace the 1 with the scale option implementation
// TODO: Make this a function parameter, once we distribute iperf endpoints, possibly via session affinity.
numClient := 1
numServer := 1
maxBandwidthBits := gceBandwidthBitsEstimate
familyStr := ""
if isIPv6 {
familyStr = "-V "
}
It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items)
// for a single service, we expect to divide bandwidth between the network. Very crude estimate.
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
Expect(totalPods).NotTo(Equal(0))
appName := "iperf-e2e"
err, _ := f.CreateServiceForSimpleAppWithPods(
8001,
8002,
appName,
func(n v1.Node) v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{{
Name: "iperf-server",
Image: imageutils.GetE2EImage(imageutils.Iperf),
Args: []string{
"/bin/sh",
"-c",
"/usr/local/bin/iperf " + familyStr + "-s -p 8001 ",
},
Ports: []v1.ContainerPort{{ContainerPort: 8001}},
}},
NodeName: n.Name,
RestartPolicy: v1.RestartPolicyOnFailure,
}
},
// this will be used to generate the -service name which all iperf clients point at.
numServer, // Generally should be 1 server unless we do affinity or use a version of iperf that supports LB
true, // Make sure we wait, otherwise all the clients will die and need to restart.
)
if err != nil {
framework.Failf("Fatal error waiting for iperf server endpoint : %v", err)
}
iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp(
"iperf-e2e-cli",
func(n v1.Node) v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{
{
Name: "iperf-client",
Image: imageutils.GetE2EImage(imageutils.Iperf),
Args: []string{
"/bin/sh",
"-c",
"/usr/local/bin/iperf " + familyStr + "-c service-for-" + appName + " -p 8002 --reportstyle C && sleep 5",
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure, // let them successfully die.
}
},
numClient,
)
framework.Logf("Reading all perf results to stdout.")
framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")
// Calculate expected number of clients based on total nodes.
expectedCli := func() int {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
}()
// Extra 1/10 second per client.
iperfTimeout := smallClusterTimeout + (time.Duration(expectedCli/10) * time.Second)
iperfResults := &IPerfResults{}
iperfClusterVerification := f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: iperfClientPodLabels,
ValidPhases: []v1.PodPhase{v1.PodSucceeded},
},
)
pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout)
if err2 != nil {
framework.Failf("Error in wait...")
} else if len(pods) < expectedCli {
framework.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout)
} else {
// For each builds up a collection of IPerfRecords
iperfClusterVerification.ForEach(
func(p v1.Pod) {
resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second)
if err == nil {
framework.Logf(resultS)
iperfResults.Add(NewIPerf(resultS))
} else {
framework.Failf("Unexpected error, %v when running forEach on the pods.", err)
}
})
}
fmt.Println("[begin] Node,Bandwith CSV")
fmt.Println(iperfResults.ToTSV())
fmt.Println("[end] Node,Bandwith CSV")
for ipClient, bandwidth := range iperfResults.BandwidthMap {
framework.Logf("%v had bandwidth %v. Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth))
}
})
}
// Declared as Flakey since it has not been proven to run in parallel on small nodes or slow networks in CI
// TODO jayunit100 : Retag this test according to semantics from #22401
var _ = SIGDescribe("Networking IPerf IPv4 [Experimental] [Feature:Networking-IPv4] [Slow] [Feature:Networking-Performance]", func() {
networkingIPerfTest(false)
})
// Declared as Flakey since it has not been proven to run in parallel on small nodes or slow networks in CI
// TODO jayunit100 : Retag this test according to semantics from #22401
var _ = SIGDescribe("Networking IPerf IPv6 [Experimental] [Feature:Networking-IPv6] [Slow] [Feature:Networking-Performance]", func() {
networkingIPerfTest(true)
})

260
vendor/k8s.io/kubernetes/test/e2e/network/no_snat.go generated vendored Normal file
View File

@ -0,0 +1,260 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
// . "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
testPodPort = 8080
testProxyPort = 31235 // Firewall rule allows external traffic on ports 30000-32767. I just picked a random one.
)
var testPodImage = imageutils.GetE2EImage(imageutils.NoSnatTest)
var testProxyImage = imageutils.GetE2EImage(imageutils.NoSnatTestProxy)
var (
testPod = v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "no-snat-test",
Labels: map[string]string{
"no-snat-test": "",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "no-snat-test",
Image: testPodImage,
Args: []string{"--port", strconv.Itoa(testPodPort)},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "status.podIP"}},
},
},
},
},
},
}
testProxyPod = v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "no-snat-test-proxy",
},
Spec: v1.PodSpec{
HostNetwork: true,
Containers: []v1.Container{
{
Name: "no-snat-test-proxy",
Image: testProxyImage,
Args: []string{"--port", strconv.Itoa(testProxyPort)},
Ports: []v1.ContainerPort{
{
ContainerPort: testProxyPort,
HostPort: testProxyPort,
},
},
},
},
},
}
)
// Produces a pod spec that passes nip as NODE_IP env var using downward API
func newTestPod(nodename string, nip string) *v1.Pod {
pod := testPod
node_ip := v1.EnvVar{
Name: "NODE_IP",
Value: nip,
}
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, node_ip)
pod.Spec.NodeName = nodename
return &pod
}
func newTestProxyPod(nodename string) *v1.Pod {
pod := testProxyPod
pod.Spec.NodeName = nodename
return &pod
}
func getIP(iptype v1.NodeAddressType, node *v1.Node) (string, error) {
for _, addr := range node.Status.Addresses {
if addr.Type == iptype {
return addr.Address, nil
}
}
return "", fmt.Errorf("did not find %s on Node", iptype)
}
func getSchedulable(nodes []v1.Node) (*v1.Node, error) {
for _, node := range nodes {
if node.Spec.Unschedulable == false {
return &node, nil
}
}
return nil, fmt.Errorf("all Nodes were unschedulable")
}
func checknosnatURL(proxy, pip string, ips []string) string {
return fmt.Sprintf("http://%s/checknosnat?target=%s&ips=%s", proxy, pip, strings.Join(ips, ","))
}
// This test verifies that a Pod on each node in a cluster can talk to Pods on every other node without SNAT.
// We use the [Feature:NoSNAT] tag so that most jobs will skip this test by default.
var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
f := framework.NewDefaultFramework("no-snat-test")
It("Should be able to send traffic between Pods without SNAT", func() {
cs := f.ClientSet
pc := cs.CoreV1().Pods(f.Namespace.Name)
nc := cs.CoreV1().Nodes()
By("creating a test pod on each Node")
nodes, err := nc.List(metav1.ListOptions{})
framework.ExpectNoError(err)
if len(nodes.Items) == 0 {
framework.ExpectNoError(fmt.Errorf("no Nodes in the cluster"))
}
for _, node := range nodes.Items {
// find the Node's internal ip address to feed to the Pod
inIP, err := getIP(v1.NodeInternalIP, &node)
framework.ExpectNoError(err)
// target Pod at Node and feed Pod Node's InternalIP
pod := newTestPod(node.Name, inIP)
_, err = pc.Create(pod)
framework.ExpectNoError(err)
}
// In some (most?) scenarios, the test harness doesn't run in the same network as the Pods,
// which means it can't query Pods using their cluster-internal IPs. To get around this,
// we create a Pod in a Node's host network, and have that Pod serve on a specific port of that Node.
// We can then ask this proxy Pod to query the internal endpoints served by the test Pods.
// Find the first schedulable node; masters are marked unschedulable. We don't put the proxy on the master
// because in some (most?) deployments firewall rules don't allow external traffic to hit ports 30000-32767
// on the master, but do allow this on the nodes.
node, err := getSchedulable(nodes.Items)
framework.ExpectNoError(err)
By("creating a no-snat-test-proxy Pod on Node " + node.Name + " port " + strconv.Itoa(testProxyPort) +
" so we can target our test Pods through this Node's ExternalIP")
extIP, err := getIP(v1.NodeExternalIP, node)
framework.ExpectNoError(err)
proxyNodeIP := extIP + ":" + strconv.Itoa(testProxyPort)
_, err = pc.Create(newTestProxyPod(node.Name))
framework.ExpectNoError(err)
By("waiting for all of the no-snat-test pods to be scheduled and running")
err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) {
pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"})
if err != nil {
return false, err
}
// check all pods are running
for _, pod := range pods.Items {
if pod.Status.Phase != v1.PodRunning {
if pod.Status.Phase != v1.PodPending {
return false, fmt.Errorf("expected pod to be in phase \"Pending\" or \"Running\"")
}
return false, nil // pod is still pending
}
}
return true, nil // all pods are running
})
framework.ExpectNoError(err)
By("waiting for the no-snat-test-proxy Pod to be scheduled and running")
err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) {
pod, err := pc.Get("no-snat-test-proxy", metav1.GetOptions{})
if err != nil {
return false, err
}
if pod.Status.Phase != v1.PodRunning {
if pod.Status.Phase != v1.PodPending {
return false, fmt.Errorf("expected pod to be in phase \"Pending\" or \"Running\"")
}
return false, nil // pod is still pending
}
return true, nil // pod is running
})
framework.ExpectNoError(err)
By("sending traffic from each pod to the others and checking that SNAT does not occur")
pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"})
framework.ExpectNoError(err)
// collect pod IPs
podIPs := []string{}
for _, pod := range pods.Items {
podIPs = append(podIPs, pod.Status.PodIP+":"+strconv.Itoa(testPodPort))
}
// hit the /checknosnat endpoint on each Pod, tell each Pod to check all the other Pods
// this test is O(n^2) but it doesn't matter because we only run this test on small clusters (~3 nodes)
errs := []string{}
client := http.Client{
Timeout: 5 * time.Minute,
}
for _, pip := range podIPs {
ips := []string{}
for _, ip := range podIPs {
if ip == pip {
continue
}
ips = append(ips, ip)
}
// hit /checknosnat on pip, via proxy
resp, err := client.Get(checknosnatURL(proxyNodeIP, pip, ips))
framework.ExpectNoError(err)
// check error code on the response, if 500 record the body, which will describe the error
if resp.StatusCode == 500 {
body, err := ioutil.ReadAll(resp.Body)
framework.ExpectNoError(err)
errs = append(errs, string(body))
}
resp.Body.Close()
}
// report the errors all at the end
if len(errs) > 0 {
str := strings.Join(errs, "\n")
err := fmt.Errorf("/checknosnat failed in the following cases:\n%s", str)
framework.ExpectNoError(err)
}
})
})

368
vendor/k8s.io/kubernetes/test/e2e/network/proxy.go generated vendored Normal file
View File

@ -0,0 +1,368 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// OWNER = sig/network
package network
import (
"fmt"
"math"
"net/http"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/net"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// Try all the proxy tests this many times (to catch even rare flakes).
proxyAttempts = 20
// Only print this many characters of the response (to keep the logs
// legible).
maxDisplayBodyLen = 100
// We have seen one of these calls take just over 15 seconds, so putting this at 30.
proxyHTTPCallTimeout = 30 * time.Second
)
var _ = SIGDescribe("Proxy", func() {
version := testapi.Groups[v1.GroupName].GroupVersion().Version
Context("version "+version, func() {
options := framework.FrameworkOptions{
ClientQPS: -1.0,
}
f := framework.NewFramework("proxy", options, nil)
prefix := "/api/" + version
// Port here has to be kept in sync with default kubelet port.
/*
Testname: proxy-prefix-node-logs-port
Description: Ensure that proxy on node logs works with generic top
level prefix proxy and explicit kubelet port.
*/
framework.ConformanceIt("should proxy logs on node with explicit kubelet port ", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":10250/logs/") })
/*
Testname: proxy-prefix-node-logs
Description: Ensure that proxy on node logs works with generic top
level prefix proxy.
*/
framework.ConformanceIt("should proxy logs on node ", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", "/logs/") })
It("should proxy to cadvisor", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":4194/containers/") })
/*
Testname: proxy-subresource-node-logs-port
Description: Ensure that proxy on node logs works with node proxy
subresource and explicit kubelet port.
*/
framework.ConformanceIt("should proxy logs on node with explicit kubelet port using proxy subresource ", func() { nodeProxyTest(f, prefix+"/nodes/", ":10250/proxy/logs/") })
/*
Testname: proxy-subresource-node-logs
Description: Ensure that proxy on node logs works with node proxy
subresource.
*/
framework.ConformanceIt("should proxy logs on node using proxy subresource ", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") })
It("should proxy to cadvisor using proxy subresource", func() { nodeProxyTest(f, prefix+"/nodes/", ":4194/proxy/containers/") })
// using the porter image to serve content, access the content
// (of multiple pods?) from multiple (endpoints/services?)
/*
Testname: proxy-service-pod
Description: Ensure that proxy through a service and a pod works with
both generic top level prefix proxy and proxy subresource.
*/
framework.ConformanceIt("should proxy through a service and a pod ", func() {
start := time.Now()
labels := map[string]string{"proxy-service-target": "true"}
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "proxy-service-",
},
Spec: v1.ServiceSpec{
Selector: labels,
Ports: []v1.ServicePort{
{
Name: "portname1",
Port: 80,
TargetPort: intstr.FromString("dest1"),
},
{
Name: "portname2",
Port: 81,
TargetPort: intstr.FromInt(162),
},
{
Name: "tlsportname1",
Port: 443,
TargetPort: intstr.FromString("tlsdest1"),
},
{
Name: "tlsportname2",
Port: 444,
TargetPort: intstr.FromInt(462),
},
},
},
})
Expect(err).NotTo(HaveOccurred())
// Make an RC with a single pod. The 'porter' image is
// a simple server which serves the values of the
// environmental variables below.
By("starting an echo server on multiple ports")
pods := []*v1.Pod{}
cfg := testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Image: imageutils.GetE2EImage(imageutils.Porter),
Name: service.Name,
Namespace: f.Namespace.Name,
Replicas: 1,
PollInterval: time.Second,
Env: map[string]string{
"SERVE_PORT_80": `<a href="/rewriteme">test</a>`,
"SERVE_PORT_1080": `<a href="/rewriteme">test</a>`,
"SERVE_PORT_160": "foo",
"SERVE_PORT_162": "bar",
"SERVE_TLS_PORT_443": `<a href="/tlsrewriteme">test</a>`,
"SERVE_TLS_PORT_460": `tls baz`,
"SERVE_TLS_PORT_462": `tls qux`,
},
Ports: map[string]int{
"dest1": 160,
"dest2": 162,
"tlsdest1": 460,
"tlsdest2": 462,
},
ReadinessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(80),
},
},
InitialDelaySeconds: 1,
TimeoutSeconds: 5,
PeriodSeconds: 10,
},
Labels: labels,
CreatedPods: &pods,
}
Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, cfg.Name)
Expect(framework.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(HaveOccurred())
// table constructors
// Try proxying through the service and directly to through the pod.
svcProxyURL := func(scheme, port string) string {
return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port)
}
subresourceServiceProxyURL := func(scheme, port string) string {
return prefix + "/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port) + "/proxy"
}
podProxyURL := func(scheme, port string) string {
return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port)
}
subresourcePodProxyURL := func(scheme, port string) string {
return prefix + "/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port) + "/proxy"
}
// construct the table
expectations := map[string]string{
svcProxyURL("", "portname1") + "/": "foo",
svcProxyURL("", "80") + "/": "foo",
svcProxyURL("", "portname2") + "/": "bar",
svcProxyURL("", "81") + "/": "bar",
svcProxyURL("http", "portname1") + "/": "foo",
svcProxyURL("http", "80") + "/": "foo",
svcProxyURL("http", "portname2") + "/": "bar",
svcProxyURL("http", "81") + "/": "bar",
svcProxyURL("https", "tlsportname1") + "/": "tls baz",
svcProxyURL("https", "443") + "/": "tls baz",
svcProxyURL("https", "tlsportname2") + "/": "tls qux",
svcProxyURL("https", "444") + "/": "tls qux",
subresourceServiceProxyURL("", "portname1") + "/": "foo",
subresourceServiceProxyURL("http", "portname1") + "/": "foo",
subresourceServiceProxyURL("", "portname2") + "/": "bar",
subresourceServiceProxyURL("http", "portname2") + "/": "bar",
subresourceServiceProxyURL("https", "tlsportname1") + "/": "tls baz",
subresourceServiceProxyURL("https", "tlsportname2") + "/": "tls qux",
podProxyURL("", "1080") + "/": `<a href="` + podProxyURL("", "1080") + `/rewriteme">test</a>`,
podProxyURL("", "160") + "/": "foo",
podProxyURL("", "162") + "/": "bar",
podProxyURL("http", "1080") + "/": `<a href="` + podProxyURL("http", "1080") + `/rewriteme">test</a>`,
podProxyURL("http", "160") + "/": "foo",
podProxyURL("http", "162") + "/": "bar",
subresourcePodProxyURL("", "") + "/": `<a href="` + subresourcePodProxyURL("", "") + `/rewriteme">test</a>`,
subresourcePodProxyURL("", "1080") + "/": `<a href="` + subresourcePodProxyURL("", "1080") + `/rewriteme">test</a>`,
subresourcePodProxyURL("http", "1080") + "/": `<a href="` + subresourcePodProxyURL("http", "1080") + `/rewriteme">test</a>`,
subresourcePodProxyURL("", "160") + "/": "foo",
subresourcePodProxyURL("http", "160") + "/": "foo",
subresourcePodProxyURL("", "162") + "/": "bar",
subresourcePodProxyURL("http", "162") + "/": "bar",
subresourcePodProxyURL("https", "443") + "/": `<a href="` + subresourcePodProxyURL("https", "443") + `/tlsrewriteme">test</a>`,
subresourcePodProxyURL("https", "460") + "/": "tls baz",
subresourcePodProxyURL("https", "462") + "/": "tls qux",
// TODO: below entries don't work, but I believe we should make them work.
// podPrefix + ":dest1": "foo",
// podPrefix + ":dest2": "bar",
}
wg := sync.WaitGroup{}
errs := []string{}
errLock := sync.Mutex{}
recordError := func(s string) {
errLock.Lock()
defer errLock.Unlock()
errs = append(errs, s)
}
d := time.Since(start)
framework.Logf("setup took %v, starting test cases", d)
numberTestCases := len(expectations)
totalAttempts := numberTestCases * proxyAttempts
By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts))
for i := 0; i < proxyAttempts; i++ {
wg.Add(numberTestCases)
for path, val := range expectations {
go func(i int, path, val string) {
defer wg.Done()
// this runs the test case
body, status, d, err := doProxy(f, path, i)
if err != nil {
if serr, ok := err.(*errors.StatusError); ok {
recordError(fmt.Sprintf("%v (%v; %v): path %v gave status error: %+v",
i, status, d, path, serr.Status()))
} else {
recordError(fmt.Sprintf("%v: path %v gave error: %v", i, path, err))
}
return
}
if status != http.StatusOK {
recordError(fmt.Sprintf("%v: path %v gave status: %v", i, path, status))
}
if e, a := val, string(body); e != a {
recordError(fmt.Sprintf("%v: path %v: wanted %v, got %v", i, path, e, a))
}
if d > proxyHTTPCallTimeout {
recordError(fmt.Sprintf("%v: path %v took %v > %v", i, path, d, proxyHTTPCallTimeout))
}
}(i, path, val)
}
wg.Wait()
}
if len(errs) != 0 {
body, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &v1.PodLogOptions{}).Do().Raw()
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err)
} else {
framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
}
framework.Failf(strings.Join(errs, "\n"))
}
})
})
})
func doProxy(f *framework.Framework, path string, i int) (body []byte, statusCode int, d time.Duration, err error) {
// About all of the proxy accesses in this file:
// * AbsPath is used because it preserves the trailing '/'.
// * Do().Raw() is used (instead of DoRaw()) because it will turn an
// error from apiserver proxy into an actual error, and there is no
// chance of the things we are talking to being confused for an error
// that apiserver would have emitted.
start := time.Now()
body, err = f.ClientSet.CoreV1().RESTClient().Get().AbsPath(path).Do().StatusCode(&statusCode).Raw()
d = time.Since(start)
if len(body) > 0 {
framework.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d)
} else {
framework.Logf("%v: %s (%v; %v)", path, "no body", statusCode, d)
}
return
}
func truncate(b []byte, maxLen int) []byte {
if len(b) <= maxLen-3 {
return b
}
b2 := append([]byte(nil), b[:maxLen-3]...)
b2 = append(b2, '.', '.', '.')
return b2
}
func pickNode(cs clientset.Interface) (string, error) {
// TODO: investigate why it doesn't work on master Node.
nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy")
}
return nodes.Items[0].Name, nil
}
func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
node, err := pickNode(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
// TODO: Change it to test whether all requests succeeded when requests
// not reaching Kubelet issue is debugged.
serviceUnavailableErrors := 0
for i := 0; i < proxyAttempts; i++ {
_, status, d, err := doProxy(f, prefix+node+nodeDest, i)
if status == http.StatusServiceUnavailable {
framework.Logf("Failed proxying node logs due to service unavailable: %v", err)
time.Sleep(time.Second)
serviceUnavailableErrors++
} else {
Expect(err).NotTo(HaveOccurred())
Expect(status).To(Equal(http.StatusOK))
Expect(d).To(BeNumerically("<", proxyHTTPCallTimeout))
}
}
if serviceUnavailableErrors > 0 {
framework.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors)
}
maxFailures := int(math.Floor(0.1 * float64(proxyAttempts)))
Expect(serviceUnavailableErrors).To(BeNumerically("<", maxFailures))
}

1841
vendor/k8s.io/kubernetes/test/e2e/network/service.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,351 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"sort"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
)
type durations []time.Duration
func (d durations) Len() int { return len(d) }
func (d durations) Less(i, j int) bool { return d[i] < d[j] }
func (d durations) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
var _ = SIGDescribe("Service endpoints latency", func() {
f := framework.NewDefaultFramework("svc-latency")
/*
Testname: service-endpoint-latency
Description: Ensure service endpoint's latency is not high
(e.g. p50 < 20 seconds and p99 < 50 seconds). If any call to the
service endpoint fails, the test will also fail.
*/
framework.ConformanceIt("should not be very high ", func() {
const (
// These are very generous criteria. Ideally we will
// get this much lower in the future. See issue
// #10436.
limitMedian = time.Second * 20
limitTail = time.Second * 50
// Numbers chosen to make the test complete in a short amount
// of time. This sample size is not actually large enough to
// reliably measure tails (it may give false positives, but not
// false negatives), but it should catch low hanging fruit.
//
// Note that these are fixed and do not depend on the
// size of the cluster. Setting parallelTrials larger
// distorts the measurements. Perhaps this wouldn't be
// true on HA clusters.
totalTrials = 200
parallelTrials = 15
minSampleSize = 100
)
// Turn off rate limiting--it interferes with our measurements.
oldThrottle := f.ClientSet.CoreV1().RESTClient().GetRateLimiter()
f.ClientSet.CoreV1().RESTClient().(*restclient.RESTClient).Throttle = flowcontrol.NewFakeAlwaysRateLimiter()
defer func() { f.ClientSet.CoreV1().RESTClient().(*restclient.RESTClient).Throttle = oldThrottle }()
failing := sets.NewString()
d, err := runServiceLatencies(f, parallelTrials, totalTrials)
if err != nil {
failing.Insert(fmt.Sprintf("Not all RC/pod/service trials succeeded: %v", err))
}
dSorted := durations(d)
sort.Sort(dSorted)
n := len(dSorted)
if n < minSampleSize {
failing.Insert(fmt.Sprintf("Did not get a good sample size: %v", dSorted))
}
if n < 2 {
failing.Insert("Less than two runs succeeded; aborting.")
framework.Failf(strings.Join(failing.List(), "\n"))
}
percentile := func(p int) time.Duration {
est := n * p / 100
if est >= n {
return dSorted[n-1]
}
return dSorted[est]
}
framework.Logf("Latencies: %v", dSorted)
p50 := percentile(50)
p90 := percentile(90)
p99 := percentile(99)
framework.Logf("50 %%ile: %v", p50)
framework.Logf("90 %%ile: %v", p90)
framework.Logf("99 %%ile: %v", p99)
framework.Logf("Total sample count: %v", len(dSorted))
if p50 > limitMedian {
failing.Insert("Median latency should be less than " + limitMedian.String())
}
if p99 > limitTail {
failing.Insert("Tail (99 percentile) latency should be less than " + limitTail.String())
}
if failing.Len() > 0 {
errList := strings.Join(failing.List(), "\n")
helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
framework.Failf(errList + helpfulInfo)
}
})
})
func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) {
cfg := testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Image: framework.GetPauseImageName(f.ClientSet),
Name: "svc-latency-rc",
Namespace: f.Namespace.Name,
Replicas: 1,
PollInterval: time.Second,
}
if err := framework.RunRC(cfg); err != nil {
return nil, err
}
// Run a single watcher, to reduce the number of API calls we have to
// make; this is to minimize the timing error. It's how kube-proxy
// consumes the endpoints data, so it seems like the right thing to
// test.
endpointQueries := newQuerier()
startEndpointWatcher(f, endpointQueries)
defer close(endpointQueries.stop)
// run one test and throw it away-- this is to make sure that the pod's
// ready status has propagated.
singleServiceLatency(f, cfg.Name, endpointQueries)
// These channels are never closed, and each attempt sends on exactly
// one of these channels, so the sum of the things sent over them will
// be exactly total.
errs := make(chan error, total)
durations := make(chan time.Duration, total)
blocker := make(chan struct{}, inParallel)
for i := 0; i < total; i++ {
go func() {
defer GinkgoRecover()
blocker <- struct{}{}
defer func() { <-blocker }()
if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil {
errs <- err
} else {
durations <- d
}
}()
}
errCount := 0
for i := 0; i < total; i++ {
select {
case e := <-errs:
framework.Logf("Got error: %v", e)
errCount += 1
case d := <-durations:
output = append(output, d)
}
}
if errCount != 0 {
return output, fmt.Errorf("got %v errors", errCount)
}
return output, nil
}
type endpointQuery struct {
endpointsName string
endpoints *v1.Endpoints
result chan<- struct{}
}
type endpointQueries struct {
requests map[string]*endpointQuery
stop chan struct{}
requestChan chan *endpointQuery
seenChan chan *v1.Endpoints
}
func newQuerier() *endpointQueries {
eq := &endpointQueries{
requests: map[string]*endpointQuery{},
stop: make(chan struct{}, 100),
requestChan: make(chan *endpointQuery),
seenChan: make(chan *v1.Endpoints, 100),
}
go eq.join()
return eq
}
// join merges the incoming streams of requests and added endpoints. It has
// nice properties like:
// * remembering an endpoint if it happens to arrive before it is requested.
// * closing all outstanding requests (returning nil) if it is stopped.
func (eq *endpointQueries) join() {
defer func() {
// Terminate all pending requests, so that no goroutine will
// block indefinitely.
for _, req := range eq.requests {
if req.result != nil {
close(req.result)
}
}
}()
for {
select {
case <-eq.stop:
return
case req := <-eq.requestChan:
if cur, ok := eq.requests[req.endpointsName]; ok && cur.endpoints != nil {
// We've already gotten the result, so we can
// immediately satisfy this request.
delete(eq.requests, req.endpointsName)
req.endpoints = cur.endpoints
close(req.result)
} else {
// Save this request.
eq.requests[req.endpointsName] = req
}
case got := <-eq.seenChan:
if req, ok := eq.requests[got.Name]; ok {
if req.result != nil {
// Satisfy a request.
delete(eq.requests, got.Name)
req.endpoints = got
close(req.result)
} else {
// We've already recorded a result, but
// haven't gotten the request yet. Only
// keep the first result.
}
} else {
// We haven't gotten the corresponding request
// yet, save this result.
eq.requests[got.Name] = &endpointQuery{
endpoints: got,
}
}
}
}
}
// request blocks until the requested endpoint is seen.
func (eq *endpointQueries) request(endpointsName string) *v1.Endpoints {
result := make(chan struct{})
req := &endpointQuery{
endpointsName: endpointsName,
result: result,
}
eq.requestChan <- req
<-result
return req.endpoints
}
// marks e as added; does not block.
func (eq *endpointQueries) added(e *v1.Endpoints) {
eq.seenChan <- e
}
// blocks until it has finished syncing.
func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
obj, err := f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).Watch(options)
},
},
&v1.Endpoints{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if e, ok := obj.(*v1.Endpoints); ok {
if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
q.added(e)
}
}
},
UpdateFunc: func(old, cur interface{}) {
if e, ok := cur.(*v1.Endpoints); ok {
if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
q.added(e)
}
}
},
},
)
go controller.Run(q.stop)
// Wait for the controller to sync, so that we don't count any warm-up time.
for !controller.HasSynced() {
time.Sleep(100 * time.Millisecond)
}
}
func singleServiceLatency(f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) {
// Make a service that points to that pod.
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "latency-svc-",
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: 80}},
Selector: map[string]string{"name": name},
Type: v1.ServiceTypeClusterIP,
SessionAffinity: v1.ServiceAffinityNone,
},
}
startTime := time.Now()
gotSvc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc)
if err != nil {
return 0, err
}
framework.Logf("Created: %v", gotSvc.Name)
if e := q.request(gotSvc.Name); e == nil {
return 0, fmt.Errorf("Never got a result for endpoint %v", gotSvc.Name)
}
stopTime := time.Now()
d := stopTime.Sub(startTime)
framework.Logf("Got endpoints: %v [%v]", gotSvc.Name, d)
return d, nil
}

View File

@ -0,0 +1,247 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"net/http"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/manifest"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// getLoadBalancerControllers returns a list of LBCtesters.
func getLoadBalancerControllers(client clientset.Interface) []LBCTester {
return []LBCTester{
&haproxyControllerTester{
name: "haproxy",
cfg: "test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml",
client: client,
},
}
}
// getIngManagers returns a list of ingManagers.
func getIngManagers(client clientset.Interface) []*ingManager {
return []*ingManager{
{
name: "netexec",
rcCfgPaths: []string{"test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml"},
svcCfgPaths: []string{"test/e2e/testing-manifests/serviceloadbalancer/netexecsvc.yaml"},
svcNames: []string{},
client: client,
},
}
}
// LBCTester is an interface used to test loadbalancer controllers.
type LBCTester interface {
// start starts the loadbalancer controller in the given namespace
start(namespace string) error
// lookup returns the address (ip/hostname) associated with ingressKey
lookup(ingressKey string) string
// stop stops the loadbalancer controller
stop() error
// name returns the name of the loadbalancer
getName() string
}
// haproxyControllerTester implements LBCTester for bare metal haproxy LBs.
type haproxyControllerTester struct {
client clientset.Interface
cfg string
rcName string
rcNamespace string
name string
address []string
}
func (h *haproxyControllerTester) getName() string {
return h.name
}
func (h *haproxyControllerTester) start(namespace string) (err error) {
// Create a replication controller with the given configuration.
framework.Logf("Parsing rc from %v", h.cfg)
rc, err := manifest.RcFromManifest(h.cfg)
Expect(err).NotTo(HaveOccurred())
rc.Namespace = namespace
rc.Spec.Template.Labels["name"] = rc.Name
// Add the --namespace arg.
// TODO: Remove this when we have proper namespace support.
for i, c := range rc.Spec.Template.Spec.Containers {
rc.Spec.Template.Spec.Containers[i].Args = append(
c.Args, fmt.Sprintf("--namespace=%v", namespace))
framework.Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args)
}
rc, err = h.client.CoreV1().ReplicationControllers(rc.Namespace).Create(rc)
if err != nil {
return
}
if err = framework.WaitForControlledPodsRunning(h.client, namespace, rc.Name, api.Kind("ReplicationController")); err != nil {
return
}
h.rcName = rc.Name
h.rcNamespace = rc.Namespace
// Find the pods of the rc we just created.
labelSelector := labels.SelectorFromSet(
labels.Set(map[string]string{"name": h.rcName}))
options := metav1.ListOptions{LabelSelector: labelSelector.String()}
pods, err := h.client.CoreV1().Pods(h.rcNamespace).List(options)
if err != nil {
return err
}
// Find the external addresses of the nodes the pods are running on.
for _, p := range pods.Items {
wait.Poll(1*time.Second, framework.ServiceRespondingTimeout, func() (bool, error) {
address, err := framework.GetHostExternalAddress(h.client, &p)
if err != nil {
framework.Logf("%v", err)
return false, nil
}
h.address = append(h.address, address)
return true, nil
})
}
if len(h.address) == 0 {
return fmt.Errorf("No external ips found for loadbalancer %v", h.getName())
}
return nil
}
func (h *haproxyControllerTester) stop() error {
return h.client.CoreV1().ReplicationControllers(h.rcNamespace).Delete(h.rcName, nil)
}
func (h *haproxyControllerTester) lookup(ingressKey string) string {
// The address of a service is the address of the lb/servicename, currently.
return fmt.Sprintf("http://%v/%v", h.address[0], ingressKey)
}
// ingManager starts an rc and the associated service.
type ingManager struct {
rcCfgPaths []string
svcCfgPaths []string
ingCfgPath string
name string
namespace string
client clientset.Interface
svcNames []string
}
func (s *ingManager) getName() string {
return s.name
}
func (s *ingManager) start(namespace string) (err error) {
// Create rcs
for _, rcPath := range s.rcCfgPaths {
framework.Logf("Parsing rc from %v", rcPath)
var rc *v1.ReplicationController
rc, err = manifest.RcFromManifest(rcPath)
Expect(err).NotTo(HaveOccurred())
rc.Namespace = namespace
rc.Spec.Template.Labels["name"] = rc.Name
rc, err = s.client.CoreV1().ReplicationControllers(rc.Namespace).Create(rc)
if err != nil {
return
}
if err = framework.WaitForControlledPodsRunning(s.client, rc.Namespace, rc.Name, api.Kind("ReplicationController")); err != nil {
return
}
}
// Create services.
// Note that it's up to the caller to make sure the service actually matches
// the pods of the rc.
for _, svcPath := range s.svcCfgPaths {
framework.Logf("Parsing service from %v", svcPath)
var svc *v1.Service
svc, err = manifest.SvcFromManifest(svcPath)
Expect(err).NotTo(HaveOccurred())
svc.Namespace = namespace
svc, err = s.client.CoreV1().Services(svc.Namespace).Create(svc)
if err != nil {
return
}
// TODO: This is short term till we have an Ingress.
s.svcNames = append(s.svcNames, svc.Name)
}
s.name = s.svcNames[0]
s.namespace = namespace
return nil
}
func (s *ingManager) test(path string) error {
url := fmt.Sprintf("%v/hostName", path)
httpClient := &http.Client{}
return wait.Poll(1*time.Second, framework.ServiceRespondingTimeout, func() (bool, error) {
body, err := framework.SimpleGET(httpClient, url, "")
if err != nil {
framework.Logf("%v\n%v\n%v", url, body, err)
return false, nil
}
return true, nil
})
}
var _ = SIGDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func() {
// These variables are initialized after framework's beforeEach.
var ns string
var client clientset.Interface
f := framework.NewDefaultFramework("servicelb")
BeforeEach(func() {
client = f.ClientSet
ns = f.Namespace.Name
})
It("should support simple GET on Ingress ips", func() {
for _, t := range getLoadBalancerControllers(client) {
By(fmt.Sprintf("Starting loadbalancer controller %v in namespace %v", t.getName(), ns))
Expect(t.start(ns)).NotTo(HaveOccurred())
for _, s := range getIngManagers(client) {
By(fmt.Sprintf("Starting ingress manager %v in namespace %v", s.getName(), ns))
Expect(s.start(ns)).NotTo(HaveOccurred())
for _, sName := range s.svcNames {
path := t.lookup(sName)
framework.Logf("Testing path %v", path)
Expect(s.test(path)).NotTo(HaveOccurred())
}
}
Expect(t.stop()).NotTo(HaveOccurred())
}
})
})

106
vendor/k8s.io/kubernetes/test/e2e/network/util_iperf.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
// Tests network performance using iperf or other containers.
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"strings"
"k8s.io/kubernetes/test/e2e/framework"
)
type IPerfResults struct {
BandwidthMap map[string]int64
}
// IPerfResult struct modelling an iperf record....
// 20160314154239,172.17.0.3,34152,172.17.0.2,5001,3,0.0-10.0,33843707904,27074774092
type IPerfResult struct {
date string // field 1 in the csv
cli string // field 2 in the csv
cliPort int64 // ...
server string
servPort int64
id string
interval string
transferBits int64
bandwidthBits int64
}
// Add adds a new result to the Results struct.
func (i *IPerfResults) Add(ipr *IPerfResult) {
if i.BandwidthMap == nil {
i.BandwidthMap = map[string]int64{}
}
i.BandwidthMap[ipr.cli] = ipr.bandwidthBits
}
// ToTSV exports an easily readable tab delimited format of all IPerfResults.
func (i *IPerfResults) ToTSV() string {
if len(i.BandwidthMap) < 1 {
framework.Logf("Warning: no data in bandwidth map")
}
var buffer bytes.Buffer
for node, bandwidth := range i.BandwidthMap {
asJson, _ := json.Marshal(node)
buffer.WriteString("\t " + string(asJson) + "\t " + fmt.Sprintf("%E", float64(bandwidth)))
}
return buffer.String()
}
// NewIPerf parses an IPerf CSV output line into an IPerfResult.
func NewIPerf(csvLine string) *IPerfResult {
csvLine = strings.Trim(csvLine, "\n")
slice := StrSlice(strings.Split(csvLine, ","))
if len(slice) != 9 {
framework.Failf("Incorrect fields in the output: %v (%v out of 9)", slice, len(slice))
}
i := IPerfResult{}
i.date = slice.get(0)
i.cli = slice.get(1)
i.cliPort = intOrFail("client port", slice.get(2))
i.server = slice.get(3)
i.servPort = intOrFail("server port", slice.get(4))
i.id = slice.get(5)
i.interval = slice.get(6)
i.transferBits = intOrFail("transfer port", slice.get(7))
i.bandwidthBits = intOrFail("bandwidth port", slice.get(8))
return &i
}
type StrSlice []string
func (s StrSlice) get(i int) string {
if i >= 0 && i < len(s) {
return s[i]
}
return ""
}
// intOrFail is a convenience function for parsing integers.
func intOrFail(debugName string, rawValue string) int64 {
value, err := strconv.ParseInt(rawValue, 10, 64)
if err != nil {
framework.Failf("Failed parsing value %v from the string '%v' as an integer", debugName, rawValue)
}
return value
}