Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -27,21 +27,43 @@ go_library(
"proxy.go",
"service.go",
"service_latency.go",
"serviceloadbalancers.go",
"util_iperf.go",
],
importpath = "k8s.io/kubernetes/test/e2e/network",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
"//pkg/controller/endpoint:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/master/ports:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/networking/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/e2e/framework/ingress:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/network/scale:go_default_library",
"//test/images/net/nat:go_default_library",
"//test/utils:go_default_library",
@ -50,28 +72,6 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/networking/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)

View File

@ -37,8 +37,9 @@ var _ = SIGDescribe("DNS", func() {
f := framework.NewDefaultFramework("dns")
/*
Testname: dns-for-clusters
Description: Make sure that DNS can resolve the names of clusters.
Release : v1.9
Testname: DNS, cluster
Description: When a Pod is created, the pod MUST be able to resolve cluster dns entries such as kubernetes.default via DNS and /etc/hosts.
*/
framework.ConformanceIt("should provide DNS for the cluster ", func() {
// All the names we need to be able to resolve.
@ -46,17 +47,17 @@ var _ = SIGDescribe("DNS", func() {
namesToResolve := []string{
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
fmt.Sprintf("kubernetes.default.svc.%s", framework.TestContext.ClusterDNSDomain),
}
// Added due to #8512. This is critical for GCE and GKE deployments.
if framework.ProviderIs("gce", "gke") {
namesToResolve = append(namesToResolve, "google.com")
namesToResolve = append(namesToResolve, "metadata")
}
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", dnsTestPodHostName, dnsTestServiceName, f.Namespace.Name)
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", dnsTestPodHostName, dnsTestServiceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
hostEntries := []string{hostFQDN, dnsTestPodHostName}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name)
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
@ -67,8 +68,9 @@ var _ = SIGDescribe("DNS", func() {
})
/*
Testname: dns-for-services
Description: Make sure that DNS can resolve the names of services.
Release : v1.9
Testname: DNS, services
Description: When a headless service is created, the service MUST be able to resolve all the required service endpoints. When the service is created, any pod in the same namespace must be able to resolve the service by all of the expected DNS names.
*/
framework.ConformanceIt("should provide DNS for services ", func() {
// Create a test headless service.
@ -78,16 +80,18 @@ var _ = SIGDescribe("DNS", func() {
}
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName)
defer func() {
By("deleting the test headless service")
defer GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}()
regularService := framework.CreateServiceSpec("test-service-2", "", false, testServiceSelector)
regularServiceName := "test-service-2"
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName)
defer func() {
By("deleting the test service")
defer GinkgoRecover()
@ -105,8 +109,8 @@ var _ = SIGDescribe("DNS", func() {
fmt.Sprintf("_http._tcp.%s.%s.svc", regularService.Name, f.Namespace.Name),
}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name)
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
@ -128,18 +132,19 @@ var _ = SIGDescribe("DNS", func() {
podHostname := "dns-querier-2"
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName)
defer func() {
By("deleting the test headless service")
defer GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}()
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podHostname, serviceName, f.Namespace.Name)
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
hostNames := []string{hostFQDN, podHostname}
namesToResolve := []string{hostFQDN}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostNames, "", "wheezy", f.Namespace.Name)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostNames, "", "jessie", f.Namespace.Name)
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostNames, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostNames, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
@ -159,14 +164,14 @@ var _ = SIGDescribe("DNS", func() {
serviceName := "dns-test-service-3"
externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create ExternalName service: %s", serviceName)
defer func() {
By("deleting the test externalName service")
defer GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
}()
hostFQDN := fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name)
hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
@ -183,7 +188,7 @@ var _ = SIGDescribe("DNS", func() {
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.ExternalName = "bar.example.com"
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to change externalName of service: %s", serviceName)
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
@ -200,10 +205,10 @@ var _ = SIGDescribe("DNS", func() {
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
}
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName)
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy")
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
@ -214,7 +219,7 @@ var _ = SIGDescribe("DNS", func() {
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get service: %s", externalNameService.Name)
validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP)
})
@ -230,7 +235,7 @@ var _ = SIGDescribe("DNS", func() {
testDNSNameFull: testInjectedIP,
})
testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod)
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s", testServerPod.Name)
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testServerPod.Name)
framework.Logf("Created pod %v", testServerPod)
defer func() {
framework.Logf("Deleting pod %s...", testServerPod.Name)
@ -261,7 +266,7 @@ var _ = SIGDescribe("DNS", func() {
},
}
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod)
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s", testUtilsPod.Name)
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name)
framework.Logf("Created pod %v", testUtilsPod)
defer func() {
framework.Logf("Deleting pod %s...", testUtilsPod.Name)

View File

@ -65,8 +65,9 @@ func (t *dnsTestCommon) init() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(options)
Expect(err).NotTo(HaveOccurred())
namespace := "kube-system"
pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(options)
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", namespace)
Expect(len(pods.Items)).Should(BeNumerically(">=", 1))
t.dnsPod = &pods.Items[0]
@ -111,14 +112,15 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string {
cmd = append(cmd, "@"+t.dnsPod.Status.PodIP)
case "kube-dns":
cmd = append(cmd, "@"+t.dnsPod.Status.PodIP, "-p", "10053")
case "ptr-record":
cmd = append(cmd, "-x")
case "cluster-dns":
case "cluster-dns-ipv6":
cmd = append(cmd, "AAAA")
break
default:
panic(fmt.Errorf("invalid target: " + target))
}
if strings.HasSuffix(dnsName, "in-addr.arpa") || strings.HasSuffix(dnsName, "in-addr.arpa.") {
cmd = append(cmd, []string{"-t", "ptr"}...)
}
cmd = append(cmd, dnsName)
stdout, stderr, err := t.f.ExecWithOptions(framework.ExecOptions{
@ -155,23 +157,23 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) {
}.AsSelector().String(),
}
cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns)
if len(cmList.Items) == 0 {
By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm))
_, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)
} else {
By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm))
_, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)
}
}
func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string {
if t.name == "coredns" {
pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name)
return pcm.Data
}
return nil
@ -190,7 +192,7 @@ func (t *dnsTestCommon) deleteConfigMap() {
By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name))
t.cm = nil
err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete config map: %s", t.name)
}
func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
@ -213,7 +215,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
Command: []string{"sleep", "10000"},
Ports: []v1.ContainerPort{
{ContainerPort: servicePort, Protocol: "TCP"},
{ContainerPort: servicePort, Protocol: v1.ProtocolTCP},
},
},
},
@ -222,9 +224,9 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
var err error
t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.utilPod)
framework.Logf("Created pod %v", t.utilPod)
Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred())
Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.utilPod)
t.utilService = &v1.Service{
TypeMeta: metav1.TypeMeta{
@ -238,7 +240,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
Selector: map[string]string{"app": baseName},
Ports: []v1.ServicePort{
{
Protocol: "TCP",
Protocol: v1.ProtocolTCP,
Port: servicePort,
TargetPort: intstr.FromInt(servicePort),
},
@ -247,14 +249,14 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
}
t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name)
framework.Logf("Created service %v", t.utilService)
}
func (t *dnsTestCommon) deleteUtilPod() {
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Logf("Delete of pod %v:%v failed: %v",
framework.Logf("Delete of pod %v/%v failed: %v",
t.utilPod.Namespace, t.utilPod.Name, err)
}
}
@ -270,7 +272,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() {
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", pod.Name)
}
}
@ -286,7 +288,7 @@ func generateDNSServerPod(aRecords map[string]string) *v1.Pod {
Containers: []v1.Container{
{
Name: "dns",
Image: imageutils.GetE2EImage(imageutils.DNSMasq),
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
Command: []string{
"/usr/sbin/dnsmasq",
"-u", "root",
@ -313,20 +315,20 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) {
var err error
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.dnsServerPod)
framework.Logf("Created pod %v", t.dnsServerPod)
Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred())
Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod)
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(
t.dnsServerPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name)
}
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
t.createDNSPodFromObj(generateDNSServerPod(aRecords))
}
func (t *dnsTestCommon) createDNSServerWithPtrRecord() {
func (t *dnsTestCommon) createDNSServerWithPtrRecord(isIPv6 bool) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -338,13 +340,12 @@ func (t *dnsTestCommon) createDNSServerWithPtrRecord() {
Containers: []v1.Container{
{
Name: "dns",
Image: imageutils.GetE2EImage(imageutils.DNSMasq),
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
Command: []string{
"/usr/sbin/dnsmasq",
"-u", "root",
"-k",
"--log-facility", "-",
"--host-record=my.test,192.0.2.123",
"-q",
},
},
@ -353,13 +354,23 @@ func (t *dnsTestCommon) createDNSServerWithPtrRecord() {
},
}
if isIPv6 {
pod.Spec.Containers[0].Command = append(
pod.Spec.Containers[0].Command,
fmt.Sprintf("--host-record=my.test,2001:db8::29"))
} else {
pod.Spec.Containers[0].Command = append(
pod.Spec.Containers[0].Command,
fmt.Sprintf("--host-record=my.test,192.0.2.123"))
}
t.createDNSPodFromObj(pod)
}
func (t *dnsTestCommon) deleteDNSServerPod() {
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
if err := podClient.Delete(t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Logf("Delete of pod %v:%v failed: %v",
framework.Logf("Delete of pod %v/%v failed: %v",
t.utilPod.Namespace, t.dnsServerPod.Name, err)
}
}
@ -433,7 +444,7 @@ func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd, podHostName, servic
return dnsPod
}
func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookupIP string, fileNamePrefix, namespace string) (string, []string) {
func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookupIP string, fileNamePrefix, namespace, dnsDomain string) (string, []string) {
fileNames := make([]string, 0, len(namesToResolve)*2)
probeCmd := "for i in `seq 1 600`; do "
for _, name := range namesToResolve {
@ -446,10 +457,10 @@ func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookup
}
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, name)
fileNames = append(fileNames, fileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
probeCmd += fmt.Sprintf(`check="$$(dig +notcp +noall +answer +search %s %s)" && test -n "$$check" && echo OK > /results/%s;`, name, lookup, fileName)
fileName = fmt.Sprintf("%s_tcp@%s", fileNamePrefix, name)
fileNames = append(fileNames, fileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
probeCmd += fmt.Sprintf(`check="$$(dig +tcp +noall +answer +search %s %s)" && test -n "$$check" && echo OK > /results/%s;`, name, lookup, fileName)
}
for _, name := range hostEntries {
@ -460,9 +471,9 @@ func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookup
podARecByUDPFileName := fmt.Sprintf("%s_udp@PodARecord", fileNamePrefix)
podARecByTCPFileName := fmt.Sprintf("%s_tcp@PodARecord", fileNamePrefix)
probeCmd += fmt.Sprintf(`podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".%s.pod.cluster.local"}');`, namespace)
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByUDPFileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByTCPFileName)
probeCmd += fmt.Sprintf(`podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".%s.pod.%s"}');`, namespace, dnsDomain)
probeCmd += fmt.Sprintf(`check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/%s;`, podARecByUDPFileName)
probeCmd += fmt.Sprintf(`check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/%s;`, podARecByTCPFileName)
fileNames = append(fileNames, podARecByUDPFileName)
fileNames = append(fileNames, podARecByTCPFileName)
@ -470,8 +481,8 @@ func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookup
ptrLookup := fmt.Sprintf("%s.in-addr.arpa.", strings.Join(reverseArray(strings.Split(ptrLookupIP, ".")), "."))
ptrRecByUDPFileName := fmt.Sprintf("%s_udp@PTR", ptrLookupIP)
ptrRecByTCPFileName := fmt.Sprintf("%s_tcp@PTR", ptrLookupIP)
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByUDPFileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByTCPFileName)
probeCmd += fmt.Sprintf(`check="$$(dig +notcp +noall +answer +search %s PTR)" && test -n "$$check" && echo OK > /results/%s;`, ptrLookup, ptrRecByUDPFileName)
probeCmd += fmt.Sprintf(`check="$$(dig +tcp +noall +answer +search %s PTR)" && test -n "$$check" && echo OK > /results/%s;`, ptrLookup, ptrRecByTCPFileName)
fileNames = append(fileNames, ptrRecByUDPFileName)
fileNames = append(fileNames, ptrRecByTCPFileName)
}
@ -494,7 +505,7 @@ func assertFilesExist(fileNames []string, fileDir string, pod *v1.Pod, client cl
func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) {
var failed []string
framework.ExpectNoError(wait.Poll(time.Second*10, time.Second*600, func() (bool, error) {
framework.ExpectNoError(wait.PollImmediate(time.Second*5, time.Second*600, func() (bool, error) {
failed = []string{}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
@ -512,20 +523,20 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
if err != nil {
if ctx.Err() != nil {
framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
framework.Failf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
} else {
framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
framework.Logf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
}
failed = append(failed, fileName)
} else if check && strings.TrimSpace(string(contents)) != expected {
framework.Logf("File %s from pod %s contains '%s' instead of '%s'", fileName, pod.Name, string(contents), expected)
framework.Logf("File %s from pod %s/%s contains '%s' instead of '%s'", fileName, pod.Namespace, pod.Name, string(contents), expected)
failed = append(failed, fileName)
}
}
if len(failed) == 0 {
return true, nil
}
framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
framework.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed)
return false, nil
}))
Expect(len(failed)).To(Equal(0))
@ -540,7 +551,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
@ -548,7 +559,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
// Try to find results for each expected name.
By("looking for the results for each expected name from probers")
@ -556,7 +567,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
// TODO: probe from the host, too.
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
framework.Logf("DNS probes using %s/%s succeeded\n", pod.Namespace, pod.Name)
}
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
@ -568,7 +579,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
@ -576,7 +587,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
// Try to find the expected value for each expected name.
By("looking for the results for each expected name from probers")

View File

@ -41,9 +41,9 @@ var (
moreForeverTestTimeout = 2 * 60 * time.Second
)
var _ = SIGDescribe("DNS configMap federations", func() {
var _ = SIGDescribe("DNS configMap federations [Feature:Federation]", func() {
t := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()}
t := &dnsFederationsConfigMapTest{dnsTestCommon: newDnsTestCommon()}
It("should be able to change federation configuration [Slow][Serial]", func() {
t.c = t.f.ClientSet
@ -57,61 +57,115 @@ func (t *dnsFederationsConfigMapTest) run() {
defer t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod()
originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData)
t.validate()
t.validate(framework.TestContext.ClusterDNSDomain)
t.labels = []string{"abc", "ghi"}
valid1 := map[string]string{"federations": t.labels[0] + "=def"}
valid1m := map[string]string{t.labels[0]: "def"}
valid2 := map[string]string{"federations": t.labels[1] + "=xyz"}
valid2m := map[string]string{t.labels[1]: "xyz"}
invalid := map[string]string{"federations": "invalid.map=xyz"}
if t.name == "coredns" {
t.labels = []string{"abc", "ghi"}
valid1 := map[string]string{
"Corefile": fmt.Sprintf(`.:53 {
kubernetes %v in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
federation %v {
abc def.com
}
proxy . /etc/resolv.conf
}`, framework.TestContext.ClusterDNSDomain, framework.TestContext.ClusterDNSDomain)}
valid1m := map[string]string{t.labels[0]: "def.com"}
By("empty -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.validate()
valid2 := map[string]string{
"Corefile": fmt.Sprintf(`:53 {
kubernetes %v in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
federation %v {
ghi xyz.com
}
proxy . /etc/resolv.conf
}`, framework.TestContext.ClusterDNSDomain, framework.TestContext.ClusterDNSDomain)}
valid2m := map[string]string{t.labels[1]: "xyz.com"}
By("valid1 -> valid2")
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
t.validate()
By("default -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.deleteCoreDNSPods()
t.validate(framework.TestContext.ClusterDNSDomain)
By("valid2 -> invalid")
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
t.validate()
By("valid1 -> valid2")
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
t.deleteCoreDNSPods()
t.validate(framework.TestContext.ClusterDNSDomain)
By("invalid -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.validate()
By("valid2 -> default")
t.setConfigMap(&v1.ConfigMap{Data: originalConfigMapData}, nil, false)
t.deleteCoreDNSPods()
t.validate(framework.TestContext.ClusterDNSDomain)
By("valid1 -> deleted")
t.deleteConfigMap()
t.validate()
t.restoreDNSConfigMap(originalConfigMapData)
By("deleted -> invalid")
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
t.validate()
} else {
t.labels = []string{"abc", "ghi"}
valid1 := map[string]string{"federations": t.labels[0] + "=def"}
valid1m := map[string]string{t.labels[0]: "def"}
valid2 := map[string]string{"federations": t.labels[1] + "=xyz"}
valid2m := map[string]string{t.labels[1]: "xyz"}
invalid := map[string]string{"federations": "invalid.map=xyz"}
By("empty -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.validate(framework.TestContext.ClusterDNSDomain)
By("valid1 -> valid2")
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
t.validate(framework.TestContext.ClusterDNSDomain)
By("valid2 -> invalid")
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
t.validate(framework.TestContext.ClusterDNSDomain)
By("invalid -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.validate(framework.TestContext.ClusterDNSDomain)
By("valid1 -> deleted")
t.deleteConfigMap()
t.validate(framework.TestContext.ClusterDNSDomain)
By("deleted -> invalid")
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
t.validate(framework.TestContext.ClusterDNSDomain)
}
}
func (t *dnsFederationsConfigMapTest) validate() {
func (t *dnsFederationsConfigMapTest) validate(dnsDomain string) {
federations := t.fedMap
if len(federations) == 0 {
By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels))
for _, label := range t.labels {
var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.cluster.local.",
t.f.Namespace.Name, label)
var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.%s.",
t.f.Namespace.Name, label, framework.TestContext.ClusterDNSDomain)
predicate := func(actual []string) bool {
return len(actual) == 0
}
t.checkDNSRecord(federationDNS, predicate, wait.ForeverTestTimeout)
t.checkDNSRecordFrom(federationDNS, predicate, "cluster-dns", wait.ForeverTestTimeout)
}
} else {
for label := range federations {
var federationDNS = fmt.Sprintf("%s.%s.%s.svc.cluster.local.",
t.utilService.ObjectMeta.Name, t.f.Namespace.Name, label)
var localDNS = fmt.Sprintf("%s.%s.svc.cluster.local.",
t.utilService.ObjectMeta.Name, t.f.Namespace.Name)
var federationDNS = fmt.Sprintf("%s.%s.%s.svc.%s.",
t.utilService.ObjectMeta.Name, t.f.Namespace.Name, label, framework.TestContext.ClusterDNSDomain)
var localDNS = fmt.Sprintf("%s.%s.svc.%s.",
t.utilService.ObjectMeta.Name, t.f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
if t.name == "coredns" {
localDNS = t.utilService.Spec.ClusterIP
}
// Check local mapping. Checking a remote mapping requires
// creating an arbitrary DNS record which is not possible at the
// moment.
@ -124,12 +178,14 @@ func (t *dnsFederationsConfigMapTest) validate() {
}
return false
}
t.checkDNSRecord(federationDNS, predicate, wait.ForeverTestTimeout)
t.checkDNSRecordFrom(federationDNS, predicate, "cluster-dns", wait.ForeverTestTimeout)
}
}
}
func (t *dnsFederationsConfigMapTest) setConfigMap(cm *v1.ConfigMap, fedMap map[string]string, isValid bool) {
t.fedMap = nil
if isValid {
t.fedMap = fedMap
}
@ -146,7 +202,7 @@ type dnsNameserverTest struct {
dnsTestCommon
}
func (t *dnsNameserverTest) run() {
func (t *dnsNameserverTest) run(isIPv6 bool) {
t.init()
t.createUtilPodLabel("e2e-dns-configmap")
@ -154,17 +210,25 @@ func (t *dnsNameserverTest) run() {
originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData)
t.createDNSServer(map[string]string{
"abc.acme.local": "1.1.1.1",
"def.acme.local": "2.2.2.2",
"widget.local": "3.3.3.3",
})
if isIPv6 {
t.createDNSServer(map[string]string{
"abc.acme.local": "2606:4700:4700::1111",
"def.acme.local": "2606:4700:4700::2222",
"widget.local": "2606:4700:4700::3333",
})
} else {
t.createDNSServer(map[string]string{
"abc.acme.local": "1.1.1.1",
"def.acme.local": "2.2.2.2",
"widget.local": "3.3.3.3",
})
}
defer t.deleteDNSServerPod()
if t.name == "coredns" {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"Corefile": fmt.Sprintf(`.:53 {
kubernetes cluster.local in-addr.arpa ip6.arpa {
kubernetes %v in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
@ -173,7 +237,7 @@ func (t *dnsNameserverTest) run() {
}
acme.local:53 {
proxy . %v
}`, t.dnsServerPod.Status.PodIP, t.dnsServerPod.Status.PodIP),
}`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP, t.dnsServerPod.Status.PodIP),
}})
t.deleteCoreDNSPods()
@ -184,21 +248,39 @@ func (t *dnsNameserverTest) run() {
}})
}
t.checkDNSRecordFrom(
"abc.acme.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "1.1.1.1" },
"cluster-dns",
moreForeverTestTimeout)
t.checkDNSRecordFrom(
"def.acme.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "2.2.2.2" },
"cluster-dns",
moreForeverTestTimeout)
t.checkDNSRecordFrom(
"widget.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "3.3.3.3" },
"cluster-dns",
moreForeverTestTimeout)
if isIPv6 {
t.checkDNSRecordFrom(
"abc.acme.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "2606:4700:4700::1111" },
"cluster-dns-ipv6",
moreForeverTestTimeout)
t.checkDNSRecordFrom(
"def.acme.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "2606:4700:4700::2222" },
"cluster-dns-ipv6",
moreForeverTestTimeout)
t.checkDNSRecordFrom(
"widget.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "2606:4700:4700::3333" },
"cluster-dns-ipv6",
moreForeverTestTimeout)
} else {
t.checkDNSRecordFrom(
"abc.acme.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "1.1.1.1" },
"cluster-dns",
moreForeverTestTimeout)
t.checkDNSRecordFrom(
"def.acme.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "2.2.2.2" },
"cluster-dns",
moreForeverTestTimeout)
t.checkDNSRecordFrom(
"widget.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "3.3.3.3" },
"cluster-dns",
moreForeverTestTimeout)
}
t.restoreDNSConfigMap(originalConfigMapData)
// Wait for the deleted ConfigMap to take effect, otherwise the
@ -214,7 +296,7 @@ type dnsPtrFwdTest struct {
dnsTestCommon
}
func (t *dnsPtrFwdTest) run() {
func (t *dnsPtrFwdTest) run(isIPv6 bool) {
t.init()
t.createUtilPodLabel("e2e-dns-configmap")
@ -222,26 +304,34 @@ func (t *dnsPtrFwdTest) run() {
originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData)
t.createDNSServerWithPtrRecord()
t.createDNSServerWithPtrRecord(isIPv6)
defer t.deleteDNSServerPod()
// Should still be able to lookup public nameserver without explicit upstream nameserver set.
t.checkDNSRecordFrom(
"8.8.8.8.in-addr.arpa",
func(actual []string) bool { return len(actual) == 1 && actual[0] == googleDnsHostname+"." },
"cluster-dns",
moreForeverTestTimeout)
if isIPv6 {
t.checkDNSRecordFrom(
"2001:4860:4860::8888",
func(actual []string) bool { return len(actual) == 1 && actual[0] == googleDnsHostname+"." },
"ptr-record",
moreForeverTestTimeout)
} else {
t.checkDNSRecordFrom(
"8.8.8.8",
func(actual []string) bool { return len(actual) == 1 && actual[0] == googleDnsHostname+"." },
"ptr-record",
moreForeverTestTimeout)
}
if t.name == "coredns" {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"Corefile": fmt.Sprintf(`.:53 {
kubernetes cluster.local in-addr.arpa ip6.arpa {
kubernetes %v in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
proxy . %v
}`, t.dnsServerPod.Status.PodIP),
}`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP),
}})
t.deleteCoreDNSPods()
@ -251,25 +341,41 @@ func (t *dnsPtrFwdTest) run() {
}})
}
t.checkDNSRecordFrom(
"123.2.0.192.in-addr.arpa",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "my.test." },
"cluster-dns",
moreForeverTestTimeout)
if isIPv6 {
t.checkDNSRecordFrom(
"2001:db8::29",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "my.test." },
"ptr-record",
moreForeverTestTimeout)
t.restoreDNSConfigMap(originalConfigMapData)
t.checkDNSRecordFrom(
"123.2.0.192.in-addr.arpa",
func(actual []string) bool { return len(actual) == 0 },
"cluster-dns",
moreForeverTestTimeout)
t.restoreDNSConfigMap(originalConfigMapData)
t.checkDNSRecordFrom(
"2001:db8::29",
func(actual []string) bool { return len(actual) == 0 },
"ptr-record",
moreForeverTestTimeout)
} else {
t.checkDNSRecordFrom(
"192.0.2.123",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "my.test." },
"ptr-record",
moreForeverTestTimeout)
t.restoreDNSConfigMap(originalConfigMapData)
t.checkDNSRecordFrom(
"192.0.2.123",
func(actual []string) bool { return len(actual) == 0 },
"ptr-record",
moreForeverTestTimeout)
}
}
type dnsExternalNameTest struct {
dnsTestCommon
}
func (t *dnsExternalNameTest) run() {
func (t *dnsExternalNameTest) run(isIPv6 bool) {
t.init()
t.createUtilPodLabel("e2e-dns-configmap")
@ -278,9 +384,15 @@ func (t *dnsExternalNameTest) run() {
defer t.restoreDNSConfigMap(originalConfigMapData)
fooHostname := "foo.example.com"
t.createDNSServer(map[string]string{
fooHostname: "192.0.2.123",
})
if isIPv6 {
t.createDNSServer(map[string]string{
fooHostname: "2001:db8::29",
})
} else {
t.createDNSServer(map[string]string{
fooHostname: "192.0.2.123",
})
}
defer t.deleteDNSServerPod()
f := t.f
@ -301,24 +413,34 @@ func (t *dnsExternalNameTest) run() {
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameServiceLocal.Name, nil)
}()
t.checkDNSRecordFrom(
fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name),
func(actual []string) bool {
return len(actual) >= 1 && actual[0] == googleDnsHostname+"."
},
"cluster-dns",
moreForeverTestTimeout)
if isIPv6 {
t.checkDNSRecordFrom(
fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain),
func(actual []string) bool {
return len(actual) >= 1 && actual[0] == googleDnsHostname+"."
},
"cluster-dns-ipv6",
moreForeverTestTimeout)
} else {
t.checkDNSRecordFrom(
fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain),
func(actual []string) bool {
return len(actual) >= 1 && actual[0] == googleDnsHostname+"."
},
"cluster-dns",
moreForeverTestTimeout)
}
if t.name == "coredns" {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"Corefile": fmt.Sprintf(`.:53 {
kubernetes cluster.local in-addr.arpa ip6.arpa {
kubernetes %v in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
proxy . %v
}`, t.dnsServerPod.Status.PodIP),
}`, framework.TestContext.ClusterDNSDomain, t.dnsServerPod.Status.PodIP),
}})
t.deleteCoreDNSPods()
@ -327,26 +449,35 @@ func (t *dnsExternalNameTest) run() {
"upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP),
}})
}
t.checkDNSRecordFrom(
fmt.Sprintf("%s.%s.svc.cluster.local", serviceNameLocal, f.Namespace.Name),
func(actual []string) bool {
return len(actual) == 2 && actual[0] == fooHostname+"." && actual[1] == "192.0.2.123"
},
"cluster-dns",
moreForeverTestTimeout)
if isIPv6 {
t.checkDNSRecordFrom(
fmt.Sprintf("%s.%s.svc.%s", serviceNameLocal, f.Namespace.Name, framework.TestContext.ClusterDNSDomain),
func(actual []string) bool {
return len(actual) >= 1 && actual[0] == fooHostname+"." && actual[1] == "2001:db8::29"
},
"cluster-dns-ipv6",
moreForeverTestTimeout)
} else {
t.checkDNSRecordFrom(
fmt.Sprintf("%s.%s.svc.%s", serviceNameLocal, f.Namespace.Name, framework.TestContext.ClusterDNSDomain),
func(actual []string) bool {
return len(actual) == 2 && actual[0] == fooHostname+"." && actual[1] == "192.0.2.123"
},
"cluster-dns",
moreForeverTestTimeout)
}
t.restoreDNSConfigMap(originalConfigMapData)
}
var _ = SIGDescribe("DNS configMap nameserver", func() {
var _ = SIGDescribe("DNS configMap nameserver [IPv4]", func() {
Context("Change stubDomain", func() {
nsTest := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()}
It("should be able to change stubDomain configuration [Slow][Serial]", func() {
nsTest.c = nsTest.f.ClientSet
nsTest.run()
nsTest.run(false)
})
})
@ -355,7 +486,7 @@ var _ = SIGDescribe("DNS configMap nameserver", func() {
It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() {
fwdTest.c = fwdTest.f.ClientSet
fwdTest.run()
fwdTest.run(false)
})
})
@ -364,7 +495,37 @@ var _ = SIGDescribe("DNS configMap nameserver", func() {
It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() {
externalNameTest.c = externalNameTest.f.ClientSet
externalNameTest.run()
externalNameTest.run(false)
})
})
})
var _ = SIGDescribe("DNS configMap nameserver [Feature:Networking-IPv6]", func() {
Context("Change stubDomain", func() {
nsTest := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()}
It("should be able to change stubDomain configuration [Slow][Serial]", func() {
nsTest.c = nsTest.f.ClientSet
nsTest.run(true)
})
})
Context("Forward PTR lookup", func() {
fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDnsTestCommon()}
It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() {
fwdTest.c = fwdTest.f.ClientSet
fwdTest.run(true)
})
})
Context("Forward external name lookup", func() {
externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDnsTestCommon()}
It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() {
externalNameTest.c = externalNameTest.f.ClientSet
externalNameTest.run(true)
})
})
})

View File

@ -17,6 +17,7 @@ limitations under the License.
package network
import (
"context"
"fmt"
"strconv"
"time"
@ -34,10 +35,11 @@ import (
const (
parallelCreateServiceWorkers = 1
maxServicesPerCluster = 10000
maxServicesPerNamespace = 5000
checkServicePercent = 0.05
)
var _ = SIGDescribe("[Feature:PerformanceDNS]", func() {
var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
f := framework.NewDefaultFramework("performancedns")
BeforeEach(func() {
@ -50,13 +52,22 @@ var _ = SIGDescribe("[Feature:PerformanceDNS]", func() {
// answers dns for service - creates the maximum number of services, and then check dns record for one
It("Should answer DNS query for maximum number of services per cluster", func() {
services := generateServicesInNamespace(f.Namespace.Name, maxServicesPerCluster)
// get integer ceiling of maxServicesPerCluster / maxServicesPerNamespace
numNs := (maxServicesPerCluster + maxServicesPerNamespace - 1) / maxServicesPerNamespace
var namespaces []string
for i := 0; i < numNs; i++ {
ns, _ := f.CreateNamespace(f.BaseName, nil)
namespaces = append(namespaces, ns.Name)
}
services := generateServicesInNamespaces(namespaces, maxServicesPerCluster)
createService := func(i int) {
defer GinkgoRecover()
framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, f.Namespace.Name, services[i]))
framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i]))
}
framework.Logf("Creating %v test services", maxServicesPerCluster)
workqueue.Parallelize(parallelCreateServiceWorkers, len(services), createService)
workqueue.ParallelizeUntil(context.TODO(), parallelCreateServiceWorkers, len(services), createService)
dnsTest := dnsTestCommon{
f: f,
c: f.ClientSet,
@ -72,7 +83,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS]", func() {
s := services[i]
svc, err := f.ClientSet.CoreV1().Services(s.Namespace).Get(s.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
qname := fmt.Sprintf("%v.%v.svc.cluster.local", s.Name, s.Namespace)
qname := fmt.Sprintf("%v.%v.svc.%v", s.Name, s.Namespace, framework.TestContext.ClusterDNSDomain)
framework.Logf("Querying %v expecting %v", qname, svc.Spec.ClusterIP)
dnsTest.checkDNSRecordFrom(
qname,
@ -86,13 +97,13 @@ var _ = SIGDescribe("[Feature:PerformanceDNS]", func() {
})
})
func generateServicesInNamespace(namespace string, num int) []*v1.Service {
func generateServicesInNamespaces(namespaces []string, num int) []*v1.Service {
services := make([]*v1.Service, num)
for i := 0; i < num; i++ {
services[i] = &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-" + strconv.Itoa(i),
Namespace: namespace,
Namespace: namespaces[i%len(namespaces)],
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{

View File

@ -57,7 +57,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
It("should create pod that uses dns", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/cluster-dns", file)
return filepath.Join(os.Getenv("GOPATH"), "src/k8s.io/examples/staging/cluster-dns", file)
}
// contrary to the example, this test does not use contexts, for simplicity
@ -81,8 +81,9 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
namespaces := []*v1.Namespace{nil, nil}
for i := range namespaces {
var err error
namespaces[i], err = f.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil)
Expect(err).NotTo(HaveOccurred())
namespaceName := fmt.Sprintf("dnsexample%d", i)
namespaces[i], err = f.CreateNamespace(namespaceName, nil)
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
}
for _, ns := range namespaces {
@ -104,7 +105,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns.Name).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", ns.Name)
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
@ -134,7 +135,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout)
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec")
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.svc.cluster.local", fmt.Sprintf("dns-backend.%s.svc.cluster.local", namespaces[0].Name))
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain))
// create a pod in each namespace
for _, ns := range namespaces {
@ -151,7 +152,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
// wait for pods to print their result
for _, ns := range namespaces {
_, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "pod %s failed to print result in logs", frontendPodName)
}
})
})
@ -163,10 +164,10 @@ func getNsCmdFlag(ns *v1.Namespace) string {
// pass enough context with the 'old' parameter so that it replaces what your really intended.
func prepareResourceWithReplacedString(inputFile, old, new string) string {
f, err := os.Open(inputFile)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to open file: %s", inputFile)
defer f.Close()
data, err := ioutil.ReadAll(f)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to read from file: %s", inputFile)
podYaml := strings.Replace(string(data), old, new, 1)
return podYaml
}

View File

@ -18,15 +18,17 @@ package network
import (
"fmt"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -38,13 +40,16 @@ var _ = SIGDescribe("Firewall rule", func() {
var cs clientset.Interface
var cloudConfig framework.CloudConfig
var gceCloud *gcecloud.GCECloud
var gceCloud *gcecloud.Cloud
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
var err error
cs = f.ClientSet
cloudConfig = framework.TestContext.CloudConfig
gceCloud = cloudConfig.Provider.(*gcecloud.GCECloud)
gceCloud, err = gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
})
// This test takes around 6 minutes to run
@ -55,7 +60,7 @@ var _ = SIGDescribe("Firewall rule", func() {
serviceName := "firewall-test-loadbalancer"
By("Getting cluster ID")
clusterID, err := framework.GetClusterID(cs)
clusterID, err := gce.GetClusterID(cs)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Got cluster ID: %v", clusterID)
@ -70,7 +75,7 @@ var _ = SIGDescribe("Firewall rule", func() {
By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global")
svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: framework.FirewallTestHttpPort}}
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: gce.FirewallTestHttpPort}}
svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges
})
defer func() {
@ -80,23 +85,23 @@ var _ = SIGDescribe("Firewall rule", func() {
})
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
By("Waiting for the local traffic health check firewall rule to be deleted")
localHCFwName := framework.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), false)
_, err := framework.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)
_, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
Expect(err).NotTo(HaveOccurred())
}()
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
By("Checking if service's firewall rule is correct")
lbFw := framework.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)
lbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)
fw, err := gceCloud.GetFirewall(lbFw.Name)
Expect(err).NotTo(HaveOccurred())
Expect(framework.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
By("Checking if service's nodes health check firewall rule is correct")
nodesHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)
nodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)
fw, err = gceCloud.GetFirewall(nodesHCFw.Name)
Expect(err).NotTo(HaveOccurred())
Expect(framework.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
// OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE
By("Updating LoadBalancer service to ExternalTrafficPolicy=Local")
@ -105,19 +110,19 @@ var _ = SIGDescribe("Firewall rule", func() {
})
By("Waiting for the nodes health check firewall rule to be deleted")
_, err = framework.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout)
_, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the correct local traffic health check firewall rule to be created")
localHCFw := framework.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
fw, err = framework.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
Expect(err).NotTo(HaveOccurred())
Expect(framework.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests))
for i, nodeName := range nodesNames {
podName := fmt.Sprintf("netexec%v", i)
jig.LaunchNetexecPodOnNode(f, nodeName, podName, framework.FirewallTestHttpPort, framework.FirewallTestUdpPort, true)
jig.LaunchNetexecPodOnNode(f, nodeName, podName, gce.FirewallTestHttpPort, gce.FirewallTestUdpPort, true)
defer func() {
framework.Logf("Cleaning up the netexec pod: %v", podName)
Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred())
@ -126,7 +131,7 @@ var _ = SIGDescribe("Firewall rule", func() {
// Send requests from outside of the cluster because internal traffic is whitelisted
By("Accessing the external service ip from outside, all non-master nodes should be reached")
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
Expect(framework.TestHitNodesFromOutside(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
// Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster
// by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect
@ -140,17 +145,17 @@ var _ = SIGDescribe("Firewall rule", func() {
if zoneInLabel, ok := nodeList.Items[0].Labels[kubeletapis.LabelZoneFailureDomain]; ok {
zone = zoneInLabel
}
removedTags := framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})
removedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})
defer func() {
By("Adding tags back to the node and wait till the traffic is recovered")
nodesSet.Insert(nodesNames[0])
framework.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
// Make sure traffic is recovered before exit
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
Expect(framework.TestHitNodesFromOutside(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
}()
By("Accessing serivce through the external ip and examine got no response from the node without tags")
Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred())
Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, gce.FirewallTestHttpPort, gce.FirewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred())
})
It("should have correct firewall rules for e2e cluster", func() {
@ -160,25 +165,35 @@ var _ = SIGDescribe("Firewall rule", func() {
}
By("Checking if e2e firewall rules are correct")
for _, expFw := range framework.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {
for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {
fw, err := gceCloud.GetFirewall(expFw.Name)
Expect(err).NotTo(HaveOccurred())
Expect(framework.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
}
By("Checking well known ports on master and nodes are not exposed externally")
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
Expect(len(nodeAddrs)).NotTo(BeZero())
masterAddr := framework.GetMasterAddress(cs)
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
if len(nodeAddrs) == 0 {
framework.Failf("did not find any node addresses")
}
masterAddresses := framework.GetAllMasterAddresses(cs)
for _, masterAddress := range masterAddresses {
assertNotReachableHTTPTimeout(masterAddress, ports.InsecureKubeControllerManagerPort, gce.FirewallTestTcpTimeout)
assertNotReachableHTTPTimeout(masterAddress, ports.InsecureSchedulerPort, gce.FirewallTestTcpTimeout)
}
assertNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, gce.FirewallTestTcpTimeout)
assertNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, gce.FirewallTestTcpTimeout)
assertNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, gce.FirewallTestTcpTimeout)
})
})
func assertNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) {
unreachable, err := framework.TestNotReachableHTTPTimeout(ip, port, timeout)
if err != nil {
framework.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, err)
}
if !unreachable {
framework.Failf("Was unexpectedly able to reach %s:%d", ip, port)
}
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package network
import (
"encoding/json"
"fmt"
"net/http"
"path/filepath"
@ -25,6 +26,7 @@ import (
compute "google.golang.org/api/compute/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
@ -34,8 +36,9 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ingress"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -45,14 +48,14 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
defer GinkgoRecover()
var (
ns string
jig *framework.IngressTestJig
conformanceTests []framework.IngressConformanceTests
jig *ingress.IngressTestJig
conformanceTests []ingress.IngressConformanceTests
cloudConfig framework.CloudConfig
)
f := framework.NewDefaultFramework("ingress")
BeforeEach(func() {
jig = framework.NewIngressTestJig(f.ClientSet)
jig = ingress.NewIngressTestJig(f.ClientSet)
ns = f.Namespace.Name
cloudConfig = framework.TestContext.CloudConfig
@ -75,13 +78,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Slow by design ~10m for each "It" block dominated by loadbalancer setup time
// TODO: write similar tests for nginx, haproxy and AWS Ingress.
Describe("GCE [Slow] [Feature:Ingress]", func() {
var gceController *framework.GCEIngressController
var gceController *gce.GCEIngressController
// Platform specific setup
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing gce controller")
gceController = &framework.GCEIngressController{
gceController = &gce.GCEIngressController{
Ns: ns,
Client: jig.Client,
Cloud: framework.TestContext.CloudConfig,
@ -107,7 +110,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
})
It("should conform to Ingress spec", func() {
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
for _, t := range conformanceTests {
By(t.EntryLog)
t.Execute()
@ -127,13 +130,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
nodeTags := []string{cloudConfig.NodeTag}
if framework.TestContext.Provider != "gce" {
// nodeTags would be different in GKE.
nodeTags = framework.GetNodeTags(jig.Client, cloudConfig)
nodeTags = gce.GetNodeTags(jig.Client, cloudConfig)
}
expFw := jig.ConstructFirewallForIngress(gceController, nodeTags)
expFw := jig.ConstructFirewallForIngress(gceController.GetFirewallRuleName(), nodeTags)
// Passed the last argument as `true` to verify the backend ports is a subset
// of the allowed ports in firewall rule, given there may be other existing
// ingress resources and backends we are not aware of.
Expect(framework.VerifyFirewallRule(fw, expFw, gceController.Cloud.Network, true)).NotTo(HaveOccurred())
Expect(gce.VerifyFirewallRule(fw, expFw, gceController.Cloud.Network, true)).NotTo(HaveOccurred())
// TODO: uncomment the restart test once we have a way to synchronize
// and know that the controller has resumed watching. If we delete
@ -210,7 +213,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
Expect(err).NotTo(HaveOccurred())
By("Creating a basic HTTP ingress and wait for it to come up")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, nil, nil)
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, nil, nil)
jig.WaitForIngress(true)
By("Updating the path on ingress and wait for it to take effect")
@ -238,11 +241,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
It("should not reconcile manually modified health check for ingress", func() {
By("Creating a basic HTTP ingress and wait for it to come up.")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, nil, nil)
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, nil, nil)
jig.WaitForIngress(true)
// Get cluster UID.
clusterID, err := framework.GetClusterID(f.ClientSet)
clusterID, err := gce.GetClusterID(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
// Get the related nodeports.
nodePorts := jig.GetIngressNodePorts(false)
@ -250,7 +253,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Filter health check using cluster UID as the suffix.
By("Retrieving relevant health check resources from GCE.")
gceCloud := gceController.Cloud.Provider.(*gcecloud.GCECloud)
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
hcs, err := gceCloud.ListHealthChecks()
Expect(err).NotTo(HaveOccurred())
var hcToChange *compute.HealthCheck
@ -313,8 +317,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
It("should support multiple TLS certs", func() {
By("Creating an ingress with no certs.")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "multiple-certs"), ns, map[string]string{
framework.IngressStaticIPKey: ns,
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{
ingress.IngressStaticIPKey: ns,
}, map[string]string{})
By("Adding multiple certs to the ingress.")
@ -349,8 +353,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
It("multicluster ingress should get instance group annotation", func() {
name := "echomap"
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, map[string]string{
framework.IngressClassKey: framework.MulticlusterIngressClassValue,
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{
ingress.IngressClassKey: ingress.MulticlusterIngressClassValue,
}, map[string]string{})
By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
@ -358,25 +362,25 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
framework.ExpectNoError(err)
annotations := ing.Annotations
if annotations == nil || annotations[framework.InstanceGroupAnnotation] == "" {
framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", framework.InstanceGroupAnnotation, annotations)
if annotations == nil || annotations[ingress.InstanceGroupAnnotation] == "" {
framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", ingress.InstanceGroupAnnotation, annotations)
return false, nil
}
return true, nil
})
if pollErr != nil {
framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, framework.InstanceGroupAnnotation))
framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, ingress.InstanceGroupAnnotation))
}
// Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc.
// Note: All resources except the firewall rule have an annotation.
umKey := framework.StatusPrefix + "/url-map"
fwKey := framework.StatusPrefix + "/forwarding-rule"
tpKey := framework.StatusPrefix + "/target-proxy"
fwsKey := framework.StatusPrefix + "/https-forwarding-rule"
tpsKey := framework.StatusPrefix + "/https-target-proxy"
scKey := framework.StatusPrefix + "/ssl-cert"
beKey := framework.StatusPrefix + "/backends"
umKey := ingress.StatusPrefix + "/url-map"
fwKey := ingress.StatusPrefix + "/forwarding-rule"
tpKey := ingress.StatusPrefix + "/target-proxy"
fwsKey := ingress.StatusPrefix + "/https-forwarding-rule"
tpsKey := ingress.StatusPrefix + "/https-target-proxy"
scKey := ingress.StatusPrefix + "/ssl-cert"
beKey := ingress.StatusPrefix + "/backends"
wait.Poll(2*time.Second, time.Minute, func() (bool, error) {
ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
framework.ExpectNoError(err)
@ -422,7 +426,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
httpsScheme := "request_scheme=https"
By("Create a basic HTTP2 ingress")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http2"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http2"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
address, err := jig.WaitForIngressAddress(jig.Client, jig.Ingress.Namespace, jig.Ingress.Name, framework.LoadBalancerPollTimeout)
@ -434,7 +438,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[framework.ServiceApplicationProtocolKey] = `{"http2":"HTTPS"}`
svc.Annotations[ingress.ServiceApplicationProtocolKey] = `{"http2":"HTTPS"}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
@ -444,7 +448,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[framework.ServiceApplicationProtocolKey] = `{"http2":"HTTP2"}`
svc.Annotations[ingress.ServiceApplicationProtocolKey] = `{"http2":"HTTP2"}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
@ -456,13 +460,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
})
Describe("GCE [Slow] [Feature:NEG]", func() {
var gceController *framework.GCEIngressController
var gceController *gce.GCEIngressController
// Platform specific setup
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing gce controller")
gceController = &framework.GCEIngressController{
gceController = &gce.GCEIngressController{
Ns: ns,
Client: jig.Client,
Cloud: framework.TestContext.CloudConfig,
@ -489,8 +493,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
It("should conform to Ingress spec", func() {
jig.PollInterval = 5 * time.Second
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{
framework.NEGAnnotation: "true",
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{
ingress.NEGAnnotation: `{"ingress": true}`,
})
for _, t := range conformanceTests {
By(t.EntryLog)
@ -506,7 +510,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
It("should be able to switch between IG and NEG modes", func() {
var err error
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
Expect(err).NotTo(HaveOccurred())
@ -516,7 +520,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[framework.NEGAnnotation] = "false"
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
@ -529,7 +533,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[framework.NEGAnnotation] = "true"
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
@ -539,10 +543,10 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
jig.WaitForIngress(true)
})
It("should be able to create a ClusterIP service [Unreleased]", func() {
It("should be able to create a ClusterIP service", func() {
var err error
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
svcPorts := jig.GetServicePorts(false)
usingNEG, err := gceController.BackendServiceUsingNEG(svcPorts)
@ -565,7 +569,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
Expect(err).NotTo(HaveOccurred())
}
wait.Poll(10*time.Second, framework.NEGUpdateTimeout, func() (bool, error) {
wait.Poll(10*time.Second, ingress.NEGUpdateTimeout, func() (bool, error) {
res, err := jig.GetDistinctResponseFromIngress()
if err != nil {
return false, nil
@ -575,7 +579,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
Expect(err).NotTo(HaveOccurred())
@ -600,7 +604,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
name := "hostname"
replicas := 8
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
Expect(err).NotTo(HaveOccurred())
@ -647,19 +651,157 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}
})
})
It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() {
name := "hostname"
expectedKeys := []int32{80, 443}
scaleAndValidateExposedNEG := func(num int) {
scale, err := f.ClientSet.ExtensionsV1beta1().Deployments(ns).GetScale(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if scale.Spec.Replicas != int32(num) {
scale.Spec.Replicas = int32(num)
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
Expect(err).NotTo(HaveOccurred())
}
wait.Poll(10*time.Second, ingress.NEGUpdateTimeout, func() (bool, error) {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
var status ingress.NegStatus
v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
if !ok {
// Wait for NEG sync loop to find NEGs
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
return false, nil
}
err = json.Unmarshal([]byte(v), &status)
if err != nil {
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
return false, nil
}
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
// Expect 2 NEGs to be created based on the test setup (neg-exposed)
if len(status.NetworkEndpointGroups) != 2 {
framework.Logf("Expected 2 NEGs, got %d", len(status.NetworkEndpointGroups))
return false, nil
}
for _, port := range expectedKeys {
if _, ok := status.NetworkEndpointGroups[port]; !ok {
framework.Logf("Expected ServicePort key %v, but does not exist", port)
}
}
if len(status.NetworkEndpointGroups) != len(expectedKeys) {
framework.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys)
}
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
for _, neg := range status.NetworkEndpointGroups {
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
Expect(err).NotTo(HaveOccurred())
if len(networkEndpoints) != num {
framework.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
return false, nil
}
}
return true, nil
})
}
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
Expect(err).NotTo(HaveOccurred())
Expect(usingNEG).To(BeTrue())
// initial replicas number is 1
scaleAndValidateExposedNEG(1)
By("Scale up number of backends to 5")
scaleAndValidateExposedNEG(5)
By("Scale down number of backends to 3")
scaleAndValidateExposedNEG(3)
By("Scale up number of backends to 6")
scaleAndValidateExposedNEG(6)
By("Scale down number of backends to 2")
scaleAndValidateExposedNEG(3)
})
It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() {
By("Create a basic HTTP ingress using standalone NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
name := "hostname"
detectNegAnnotation(f, jig, gceController, ns, name, 2)
// Add Ingress annotation - NEGs should stay the same.
By("Adding NEG Ingress annotation")
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
detectNegAnnotation(f, jig, gceController, ns, name, 2)
// Modify exposed NEG annotation, but keep ingress annotation
By("Modifying exposed NEG annotation, but keep Ingress annotation")
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
detectNegAnnotation(f, jig, gceController, ns, name, 2)
// Remove Ingress annotation. Expect 1 NEG
By("Disabling Ingress annotation, but keeping one standalone NEG")
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
detectNegAnnotation(f, jig, gceController, ns, name, 1)
// Remove NEG annotation entirely. Expect 0 NEGs.
By("Removing NEG annotation")
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
delete(svc.Annotations, ingress.NEGAnnotation)
// Service cannot be ClusterIP if it's using Instance Groups.
svc.Spec.Type = v1.ServiceTypeNodePort
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
detectNegAnnotation(f, jig, gceController, ns, name, 0)
})
})
Describe("GCE [Slow] [Feature:kubemci]", func() {
var gceController *framework.GCEIngressController
var gceController *gce.GCEIngressController
var ipName, ipAddress string
// Platform specific setup
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
jig.Class = framework.MulticlusterIngressClassValue
jig.Class = ingress.MulticlusterIngressClassValue
jig.PollInterval = 5 * time.Second
By("Initializing gce controller")
gceController = &framework.GCEIngressController{
gceController = &gce.GCEIngressController{
Ns: ns,
Client: jig.Client,
Cloud: framework.TestContext.CloudConfig,
@ -692,8 +834,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
})
It("should conform to Ingress spec", func() {
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{
framework.IngressStaticIPKey: ipName,
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{
ingress.IngressStaticIPKey: ipName,
})
for _, t := range conformanceTests {
By(t.EntryLog)
@ -717,9 +859,9 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
It("should remove clusters as expected", func() {
ingAnnotations := map[string]string{
framework.IngressStaticIPKey: ipName,
ingress.IngressStaticIPKey: ipName,
}
ingFilePath := filepath.Join(framework.IngressManifestPath, "http")
ingFilePath := filepath.Join(ingress.IngressManifestPath, "http")
jig.CreateIngress(ingFilePath, ns, ingAnnotations, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/)
name := jig.Ingress.Name
@ -747,7 +889,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
It("single and multi-cluster ingresses should be able to exist together", func() {
By("Creating a single cluster ingress first")
jig.Class = ""
singleIngFilePath := filepath.Join(framework.IngressManifestPath, "static-ip-2")
singleIngFilePath := filepath.Join(ingress.IngressManifestPath, "static-ip-2")
jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/)
// jig.Ingress will be overwritten when we create MCI, so keep a reference.
@ -755,11 +897,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Create the multi-cluster ingress next.
By("Creating a multi-cluster ingress next")
jig.Class = framework.MulticlusterIngressClassValue
jig.Class = ingress.MulticlusterIngressClassValue
ingAnnotations := map[string]string{
framework.IngressStaticIPKey: ipName,
ingress.IngressStaticIPKey: ipName,
}
multiIngFilePath := filepath.Join(framework.IngressManifestPath, "http")
multiIngFilePath := filepath.Join(ingress.IngressManifestPath, "http")
jig.CreateIngress(multiIngFilePath, ns, ingAnnotations, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/)
mciIngress := jig.Ingress
@ -769,7 +911,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
jig.Class = ""
jig.TryDeleteIngress()
jig.Ingress = mciIngress
jig.Class = framework.MulticlusterIngressClassValue
jig.Class = ingress.MulticlusterIngressClassValue
jig.WaitForIngress(false /*waitForNodePort*/)
By("Cleanup: Deleting the multi-cluster ingress")
@ -779,19 +921,19 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Time: borderline 5m, slow by design
Describe("[Slow] Nginx", func() {
var nginxController *framework.NginxIngressController
var nginxController *ingress.NginxIngressController
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing nginx controller")
jig.Class = "nginx"
nginxController = &framework.NginxIngressController{Ns: ns, Client: jig.Client}
nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client}
// TODO: This test may fail on other platforms. We can simply skip it
// but we want to allow easy testing where a user might've hand
// configured firewalls.
if framework.ProviderIs("gce", "gke") {
framework.ExpectNoError(framework.GcloudComputeResourceCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
framework.ExpectNoError(gce.GcloudComputeResourceCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
} else {
framework.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.")
}
@ -801,7 +943,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
AfterEach(func() {
if framework.ProviderIs("gce", "gke") {
framework.ExpectNoError(framework.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
framework.ExpectNoError(gce.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
}
if CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(ns)
@ -818,7 +960,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Poll more frequently to reduce e2e completion time.
// This test runs in presubmit.
jig.PollInterval = 5 * time.Second
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
for _, t := range conformanceTests {
By(t.EntryLog)
t.Execute()
@ -840,13 +982,13 @@ func verifyKubemciStatusHas(name, expectedSubStr string) {
}
}
func executePresharedCertTest(f *framework.Framework, jig *framework.IngressTestJig, staticIPName string) {
func executePresharedCertTest(f *framework.Framework, jig *ingress.IngressTestJig, staticIPName string) {
preSharedCertName := "test-pre-shared-cert"
By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
testHostname := "test.ingress.com"
cert, key, err := framework.GenerateRSACerts(testHostname, true)
cert, key, err := ingress.GenerateRSACerts(testHostname, true)
Expect(err).NotTo(HaveOccurred())
gceCloud, err := framework.GetGCECloud()
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
defer func() {
// We would not be able to delete the cert until ingress controller
@ -876,36 +1018,36 @@ func executePresharedCertTest(f *framework.Framework, jig *framework.IngressTest
By("Creating an ingress referencing the pre-shared certificate")
// Create an ingress referencing this cert using pre-shared-cert annotation.
ingAnnotations := map[string]string{
framework.IngressPreSharedCertKey: preSharedCertName,
ingress.IngressPreSharedCertKey: preSharedCertName,
// Disallow HTTP to save resources. This is irrelevant to the
// pre-shared cert test.
framework.IngressAllowHTTPKey: "false",
ingress.IngressAllowHTTPKey: "false",
}
if staticIPName != "" {
ingAnnotations[framework.IngressStaticIPKey] = staticIPName
ingAnnotations[ingress.IngressStaticIPKey] = staticIPName
}
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{})
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{})
By("Test that ingress works with the pre-shared certificate")
err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
}
func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *framework.IngressTestJig, ipName, ip string) {
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{
framework.IngressStaticIPKey: ipName,
framework.IngressAllowHTTPKey: "false",
func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.IngressTestJig, ipName, ip string) {
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{
ingress.IngressStaticIPKey: ipName,
ingress.IngressAllowHTTPKey: "false",
}, map[string]string{})
By("waiting for Ingress to come up with ip: " + ip)
httpClient := framework.BuildInsecureClient(framework.IngressReqTimeout)
httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
By("should reject HTTP traffic")
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
}
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *framework.IngressTestJig, staticIPName string) {
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.IngressTestJig, staticIPName string) {
By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName)
defer func() {
@ -921,7 +1063,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *framework.Ing
Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP")
By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout}
timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout}
err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
if err != nil {
@ -937,8 +1079,8 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *framework.Ing
Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress")
}
func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *framework.IngressTestJig, address, version, scheme string) {
timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout}
func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *ingress.IngressTestJig, address, version, scheme string) {
timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout}
resp := ""
err := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", address), "")
@ -958,3 +1100,49 @@ func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *framework.Ingre
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get %s or %s, response body: %s", version, scheme, resp))
}
func detectNegAnnotation(f *framework.Framework, jig *ingress.IngressTestJig, gceController *gce.GCEIngressController, ns, name string, negs int) {
wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
if err != nil {
return false, nil
}
// if we expect no NEGs, then we should be using IGs
if negs == 0 {
return gceController.BackendServiceUsingIG(jig.GetServicePorts(false))
}
var status ingress.NegStatus
v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
if !ok {
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
return false, nil
}
err = json.Unmarshal([]byte(v), &status)
if err != nil {
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
return false, nil
}
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
if len(status.NetworkEndpointGroups) != negs {
framework.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups))
return false, nil
}
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
for _, neg := range status.NetworkEndpointGroups {
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
Expect(err).NotTo(HaveOccurred())
if len(networkEndpoints) != 1 {
framework.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints))
return false, nil
}
}
return gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
})
}

View File

@ -77,6 +77,16 @@ var _ = SIGDescribe("Network", func() {
zero := int64(0)
// Some distributions (Ubuntu 16.04 etc.) don't support the proc file.
_, err := framework.IssueSSHCommandWithResult(
"ls /proc/net/nf_conntrack",
framework.TestContext.Provider,
clientNodeInfo.node)
if err != nil && strings.Contains(err.Error(), "No such file or directory") {
framework.Skipf("The node %s does not support /proc/net/nf_conntrack", clientNodeInfo.name)
}
framework.ExpectNoError(err)
clientPodSpec := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-net-client",

View File

@ -556,11 +556,11 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod
Containers: []v1.Container{
{
Name: fmt.Sprintf("%s-container", podName),
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Args: []string{
"/bin/sh",
"-c",
fmt.Sprintf("for i in $(seq 1 5); do wget -T 8 %s.%s:%d -O - && exit 0 || sleep 1; done; exit 1",
fmt.Sprintf("for i in $(seq 1 5); do curl -s -m 8 %s.%s:%d && exit 0 || sleep 1; done; exit 1",
targetService.Name, targetService.Namespace, targetPort),
},
},

View File

@ -26,10 +26,11 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -53,7 +54,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
}
for _, lb := range serviceLBNames {
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
framework.TestContext.CloudConfig.Provider.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
serviceLBNames = []string{}
@ -80,7 +81,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
Expect(err).NotTo(HaveOccurred())
Expect(svcTier).To(Equal(cloud.NetworkTierStandard))
// Record the LB name for test cleanup.
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
// Wait and verify the LB.
ingressIP := waitAndVerifyLBWithTier(jig, ns, svcName, "", createTimeout, lagTimeout)
@ -102,7 +103,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
// Test 3: create a standard-tierd LB with a user-requested IP.
By("reserving a static IP for the load balancer")
requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunId)
gceCloud, err := framework.GetGCECloud()
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard)
Expect(err).NotTo(HaveOccurred(), "failed to reserve a STANDARD tiered address")
@ -187,7 +188,7 @@ func getLBNetworkTierByIP(ip string) (cloud.NetworkTier, error) {
}
func getGCEForwardingRuleByIP(ip string) (*computealpha.ForwardingRule, error) {
cloud, err := framework.GetGCECloud()
cloud, err := gce.GetGCECloud()
if err != nil {
return nil, err
}
@ -221,7 +222,7 @@ func clearNetworkTier(svc *v1.Service) {
// TODO: add retries if this turns out to be flaky.
// TODO(#51665): remove this helper function once Network Tiers becomes beta.
func reserveAlphaRegionalAddress(cloud *gcecloud.GCECloud, name string, netTier cloud.NetworkTier) (string, error) {
func reserveAlphaRegionalAddress(cloud *gcecloud.Cloud, name string, netTier cloud.NetworkTier) (string, error) {
alphaAddr := &computealpha.Address{
Name: name,
NetworkTier: netTier.ToGCEValue(),

View File

@ -61,26 +61,25 @@ var _ = SIGDescribe("Proxy", func() {
prefix := "/api/" + version
/*
Testname: proxy-subresource-node-logs-port
Description: Ensure that proxy on node logs works with node proxy
subresource and explicit kubelet port.
Release : v1.9
Testname: Proxy, logs port endpoint
Description: Select any node in the cluster to invoke /proxy/nodes/<nodeip>:10250/logs endpoint. This endpoint MUST be reachable.
*/
framework.ConformanceIt("should proxy logs on node with explicit kubelet port using proxy subresource ", func() { nodeProxyTest(f, prefix+"/nodes/", ":10250/proxy/logs/") })
/*
Testname: proxy-subresource-node-logs
Description: Ensure that proxy on node logs works with node proxy
subresource.
Release : v1.9
Testname: Proxy, logs endpoint
Description: Select any node in the cluster to invoke /proxy/nodes/<nodeip>//logs endpoint. This endpoint MUST be reachable.
*/
framework.ConformanceIt("should proxy logs on node using proxy subresource ", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") })
// using the porter image to serve content, access the content
// (of multiple pods?) from multiple (endpoints/services?)
/*
Testname: proxy-service-pod
Description: Ensure that proxy through a service and a pod works with
both generic top level prefix proxy and proxy subresource.
Release : v1.9
Testname: Proxy, logs service endpoint
Description: Select any node in the cluster to invoke /logs endpoint using the /nodes/proxy subresource from the kubelet port. This endpoint MUST be reachable.
*/
framework.ConformanceIt("should proxy through a service and a pod ", func() {
start := time.Now()

View File

@ -6,12 +6,14 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/network/scale",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework/ingress:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
],
)

View File

@ -29,6 +29,8 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ingress"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
)
const (
@ -59,10 +61,10 @@ var (
// IngressScaleFramework defines the framework for ingress scale testing.
type IngressScaleFramework struct {
Clientset clientset.Interface
Jig *framework.IngressTestJig
GCEController *framework.GCEIngressController
Jig *ingress.IngressTestJig
GCEController *gce.GCEIngressController
CloudConfig framework.CloudConfig
Logger framework.TestLogger
Logger ingress.TestLogger
Namespace string
EnableTLS bool
@ -92,7 +94,7 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra
Namespace: ns,
Clientset: cs,
CloudConfig: cloudConfig,
Logger: &framework.E2ELogger{},
Logger: &ingress.E2ELogger{},
EnableTLS: true,
NumIngressesTest: []int{
numIngressesSmall,
@ -106,10 +108,10 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra
// PrepareScaleTest prepares framework for ingress scale testing.
func (f *IngressScaleFramework) PrepareScaleTest() error {
f.Logger.Infof("Initializing ingress test suite and gce controller...")
f.Jig = framework.NewIngressTestJig(f.Clientset)
f.Jig = ingress.NewIngressTestJig(f.Clientset)
f.Jig.Logger = f.Logger
f.Jig.PollInterval = scaleTestPollInterval
f.GCEController = &framework.GCEIngressController{
f.GCEController = &gce.GCEIngressController{
Client: f.Clientset,
Cloud: f.CloudConfig,
}

View File

@ -7,13 +7,15 @@ go_library(
visibility = ["//visibility:private"],
deps = [
"//pkg/cloudprovider/providers/gce:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/ingress:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/network/scale:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -24,7 +24,7 @@ import (
"sort"
"strconv"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -33,6 +33,8 @@ import (
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ingress"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
"k8s.io/kubernetes/test/e2e/network/scale"
)
@ -89,19 +91,19 @@ func main() {
registerFlags()
flag.Parse()
if err := verifyFlags(); err != nil {
glog.Errorf("Failed to verify flags: %v", err)
klog.Errorf("Failed to verify flags: %v", err)
os.Exit(1)
}
// Initializing a k8s client.
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
glog.Errorf("Failed to build kubeconfig: %v", err)
klog.Errorf("Failed to build kubeconfig: %v", err)
os.Exit(1)
}
cs, err := clientset.NewForConfig(config)
if err != nil {
glog.Errorf("Failed to create kubeclient: %v", err)
klog.Errorf("Failed to create kubeclient: %v", err)
os.Exit(1)
}
@ -114,15 +116,15 @@ func main() {
AlphaFeatureGate: gceAlphaFeatureGate,
})
if err != nil {
glog.Errorf("Error building GCE provider: %v", err)
klog.Errorf("Error building GCE provider: %v", err)
os.Exit(1)
}
cloudConfig.Provider = gceCloud
cloudConfig.Provider = gce.NewProvider(gceCloud)
testSuccessFlag := true
defer func() {
if !testSuccessFlag {
glog.Errorf("Ingress scale test failed.")
klog.Errorf("Ingress scale test failed.")
os.Exit(1)
}
}()
@ -132,17 +134,17 @@ func main() {
Name: testNamespace,
},
}
glog.Infof("Creating namespace %s...", ns.Name)
klog.Infof("Creating namespace %s...", ns.Name)
if _, err := cs.CoreV1().Namespaces().Create(ns); err != nil {
glog.Errorf("Failed to create namespace %s: %v", ns.Name, err)
klog.Errorf("Failed to create namespace %s: %v", ns.Name, err)
testSuccessFlag = false
return
}
if cleanup {
defer func() {
glog.Infof("Deleting namespace %s...", ns.Name)
klog.Infof("Deleting namespace %s...", ns.Name)
if err := cs.CoreV1().Namespaces().Delete(ns.Name, nil); err != nil {
glog.Errorf("Failed to delete namespace %s: %v", ns.Name, err)
klog.Errorf("Failed to delete namespace %s: %v", ns.Name, err)
testSuccessFlag = false
}
}()
@ -150,7 +152,7 @@ func main() {
// Setting up a localized scale test framework.
f := scale.NewIngressScaleFramework(cs, ns.Name, cloudConfig)
f.Logger = &framework.GLogger{}
f.Logger = &ingress.GLogger{}
// Customizing scale test.
f.EnableTLS = enableTLS
f.OutputFile = outputFile
@ -162,20 +164,20 @@ func main() {
if cleanup {
defer func() {
if errs := f.CleanupScaleTest(); len(errs) != 0 {
glog.Errorf("Failed to cleanup scale test: %v", errs)
klog.Errorf("Failed to cleanup scale test: %v", errs)
testSuccessFlag = false
}
}()
}
err = f.PrepareScaleTest()
if err != nil {
glog.Errorf("Failed to prepare scale test: %v", err)
klog.Errorf("Failed to prepare scale test: %v", err)
testSuccessFlag = false
return
}
if errs := f.RunScaleTest(); len(errs) != 0 {
glog.Errorf("Failed while running scale test: %v", errs)
klog.Errorf("Failed while running scale test: %v", errs)
testSuccessFlag = false
}
}

View File

@ -33,10 +33,12 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/cloudprovider"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
@ -57,7 +59,7 @@ var (
Ports: []v1.ServicePort{{
Port: int32(defaultServeHostnameServicePort),
TargetPort: intstr.FromInt(9376),
Protocol: "TCP",
Protocol: v1.ProtocolTCP,
}},
Selector: map[string]string{
"name": defaultServeHostnameServiceName,
@ -90,8 +92,8 @@ var _ = SIGDescribe("Services", func() {
framework.DescribeSvc(f.Namespace.Name)
}
for _, lb := range serviceLBNames {
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
framework.Logf("cleaning load balancer resource for %s", lb)
framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
serviceLBNames = []string{}
@ -100,8 +102,9 @@ var _ = SIGDescribe("Services", func() {
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
/*
Testname: service-kubernetes-exists
Description: Make sure kubernetes service does exist.
Release : v1.9
Testname: Kubernetes Service
Description: By default when a kubernetes cluster is running there MUST be a kubernetes service running in the cluster.
*/
framework.ConformanceIt("should provide secure master service ", func() {
_, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
@ -109,9 +112,9 @@ var _ = SIGDescribe("Services", func() {
})
/*
Testname: service-valid-endpoints
Description: Ensure a service with no pod, one pod or two pods has
valid/accessible endpoints (same port number for service and pods).
Release : v1.9
Testname: Service, endpoints
Description: Create a service with a endpoint without any Pods, the service MUST run and show empty endpoints. Add a pod to the service and the service MUST validate to show all the endpoints for the ports exposed by the Pod. Add another Pod then the list of all Ports exposed by both the Pods MUST be valid and have corresponding service endpoint. Once the second Pod is deleted then set of endpoint MUST be validated to show only ports from the first container that are exposed. Once both pods are deleted the endpoints from the service MUST be empty.
*/
framework.ConformanceIt("should serve a basic endpoint from pods ", func() {
serviceName := "endpoint-test2"
@ -166,9 +169,9 @@ var _ = SIGDescribe("Services", func() {
})
/*
Testname: service-valid-endpoints-multiple-ports
Description: Ensure a service with no pod, one pod or two pods has
valid/accessible endpoints (different port number for pods).
Release : v1.9
Testname: Service, endpoints with multiple ports
Description: Create a service with two ports but no Pods are added to the service yet. The service MUST run and show empty set of endpoints. Add a Pod to the first port, service MUST list one endpoint for the Pod on that port. Add another Pod to the second port, service MUST list both the endpoints. Delete the first Pod and the service MUST list only the endpoint to the second Pod. Delete the second Pod and the service must now have empty set of endpoints.
*/
framework.ConformanceIt("should serve multiport endpoints from pods ", func() {
// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
@ -588,7 +591,7 @@ var _ = SIGDescribe("Services", func() {
if framework.ProviderIs("gce", "gke") {
By("creating a static load balancer IP")
staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunId)
gceCloud, err := framework.GetGCECloud()
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
@ -620,9 +623,9 @@ var _ = SIGDescribe("Services", func() {
s.Spec.Type = v1.ServiceTypeLoadBalancer
})
}
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(tcpService))
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService))
if loadBalancerSupportsUDP {
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(udpService))
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService))
}
By("waiting for the TCP service to have a load balancer")
@ -645,7 +648,7 @@ var _ = SIGDescribe("Services", func() {
// coming from, so this is first-aid rather than surgery).
By("demoting the static IP to ephemeral")
if staticIPName != "" {
gceCloud, err := framework.GetGCECloud()
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
// Deleting it after it is attached "demotes" it to an
// ephemeral IP, which can be auto-released.
@ -896,7 +899,7 @@ var _ = SIGDescribe("Services", func() {
s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.ExternalName = ""
s.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
}
})
jig.SanityCheckService(clusterIPService, v1.ServiceTypeClusterIP)
@ -920,7 +923,7 @@ var _ = SIGDescribe("Services", func() {
s.Spec.Type = v1.ServiceTypeNodePort
s.Spec.ExternalName = ""
s.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
}
})
jig.SanityCheckService(nodePortService, v1.ServiceTypeNodePort)
@ -1110,7 +1113,7 @@ var _ = SIGDescribe("Services", func() {
}
outOfRangeNodePort := 0
rand.Seed(time.Now().UTC().UnixNano())
rand.Seed(time.Now().UnixNano())
for {
outOfRangeNodePort = 1 + rand.Intn(65535)
if !framework.ServiceNodePortRange.Contains(outOfRangeNodePort) {
@ -1271,7 +1274,7 @@ var _ = SIGDescribe("Services", func() {
By("Verifying pods for RC " + t.Name)
framework.ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.Name, false, 1))
svcName := fmt.Sprintf("%v.%v.svc.cluster.local", serviceName, f.Namespace.Name)
svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Waiting for endpoints of Service with DNS name " + svcName)
execPodName := framework.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil)
@ -1544,6 +1547,79 @@ var _ = SIGDescribe("Services", func() {
jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, createTimeout)
})
// This test creates a load balancer, make sure its health check interval
// equals to gceHcCheckIntervalSeconds. Then the interval is manipulated
// to be something else, see if the interval will be reconciled.
It("should reconcile LB health check interval [Slow][Serial]", func() {
const gceHcCheckIntervalSeconds = int64(8)
// This test is for clusters on GCE.
// (It restarts kube-controller-manager, which we don't support on GKE)
framework.SkipUnlessProviderIs("gce")
clusterID, err := gce.GetClusterID(cs)
if err != nil {
framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err)
}
gceCloud, err := gce.GetGCECloud()
if err != nil {
framework.Failf("framework.GetGCECloud() = _, %v; want nil", err)
}
namespace := f.Namespace.Name
serviceName := "lb-hc-int"
jig := framework.NewServiceTestJig(cs, serviceName)
By("create load balancer service")
// Create loadbalancer service with source range from node[0] and podAccept
svc := jig.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
})
// Clean up loadbalancer service
defer func() {
jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
})
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}()
svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, framework.LoadBalancerCreateTimeoutDefault)
hcName := gcecloud.MakeNodesHealthCheckName(clusterID)
hc, err := gceCloud.GetHTTPHealthCheck(hcName)
if err != nil {
framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err)
}
Expect(hc.CheckIntervalSec).To(Equal(gceHcCheckIntervalSeconds))
By("modify the health check interval")
hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1
if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil {
framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err)
}
By("restart kube-controller-manager")
if err := framework.RestartControllerManager(); err != nil {
framework.Failf("framework.RestartControllerManager() = %v; want nil", err)
}
if err := framework.WaitForControllerManagerUp(); err != nil {
framework.Failf("framework.WaitForControllerManagerUp() = %v; want nil", err)
}
By("health check should be reconciled")
pollInterval := framework.Poll * 10
if pollErr := wait.PollImmediate(pollInterval, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) {
hc, err := gceCloud.GetHTTPHealthCheck(hcName)
if err != nil {
framework.Logf("Failed to get HttpHealthCheck(%q): %v", hcName, err)
return false, err
}
framework.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil
}); pollErr != nil {
framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds)
}
})
It("should have session affinity work for service with type clusterIP", func() {
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeClusterIP
@ -1570,6 +1646,9 @@ var _ = SIGDescribe("Services", func() {
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() {
// L4 load balancer affinity `ClientIP` is not supported on AWS ELB.
framework.SkipIfProviderIs("aws")
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
@ -1578,6 +1657,9 @@ var _ = SIGDescribe("Services", func() {
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() {
// L4 load balancer affinity `ClientIP` is not supported on AWS ELB.
framework.SkipIfProviderIs("aws")
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
@ -1586,6 +1668,9 @@ var _ = SIGDescribe("Services", func() {
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() {
// L4 load balancer affinity `ClientIP` is not supported on AWS ELB.
framework.SkipIfProviderIs("aws")
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
@ -1594,6 +1679,9 @@ var _ = SIGDescribe("Services", func() {
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() {
// L4 load balancer affinity `ClientIP` is not supported on AWS ELB.
framework.SkipIfProviderIs("aws")
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
@ -1624,8 +1712,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
framework.DescribeSvc(f.Namespace.Name)
}
for _, lb := range serviceLBNames {
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
framework.Logf("cleaning load balancer resource for %s", lb)
framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
serviceLBNames = []string{}
@ -1637,7 +1725,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
jig := framework.NewServiceTestJig(cs, serviceName)
svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
if healthCheckNodePort == 0 {
framework.Failf("Service HealthCheck NodePort was not allocated")
@ -1709,7 +1797,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
}
})
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
defer func() {
jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
@ -1764,7 +1852,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests)
svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
defer func() {
jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
@ -1817,7 +1905,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
}
svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
defer func() {
jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
@ -2024,9 +2112,9 @@ func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
defer func() {
framework.StopServeHostnameService(cs, ns, serviceName)
lb := cloudprovider.GetLoadBalancerName(svc)
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
lb := cloudprovider.DefaultLoadBalancerName(svc)
framework.Logf("cleaning load balancer resource for %s", lb)
framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}()
ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
port := int(svc.Spec.Ports[0].Port)

View File

@ -47,10 +47,9 @@ var _ = SIGDescribe("Service endpoints latency", func() {
f := framework.NewDefaultFramework("svc-latency")
/*
Testname: service-endpoint-latency
Description: Ensure service endpoint's latency is not high
(e.g. p50 < 20 seconds and p99 < 50 seconds). If any call to the
service endpoint fails, the test will also fail.
Release : v1.9
Testname: Service endpoint latency, thresholds
Description: Run 100 iterations of create service with the Pod running the pause image, measure the time it takes for creating the service and the endpoint with the service name is available. These durations are captured for 100 iterations, then the durations are sorted to compue 50th, 90th and 99th percentile. The single server latency MUST not exceed liberally set thresholds of 20s for 50th percentile and 50s for the 90th percentile.
*/
framework.ConformanceIt("should not be very high ", func() {
const (

View File

@ -1,247 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"net/http"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/manifest"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// getLoadBalancerControllers returns a list of LBCtesters.
func getLoadBalancerControllers(client clientset.Interface) []LBCTester {
return []LBCTester{
&haproxyControllerTester{
name: "haproxy",
cfg: "test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml",
client: client,
},
}
}
// getIngManagers returns a list of ingManagers.
func getIngManagers(client clientset.Interface) []*ingManager {
return []*ingManager{
{
name: "netexec",
rcCfgPaths: []string{"test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml"},
svcCfgPaths: []string{"test/e2e/testing-manifests/serviceloadbalancer/netexecsvc.yaml"},
svcNames: []string{},
client: client,
},
}
}
// LBCTester is an interface used to test loadbalancer controllers.
type LBCTester interface {
// start starts the loadbalancer controller in the given namespace
start(namespace string) error
// lookup returns the address (ip/hostname) associated with ingressKey
lookup(ingressKey string) string
// stop stops the loadbalancer controller
stop() error
// name returns the name of the loadbalancer
getName() string
}
// haproxyControllerTester implements LBCTester for bare metal haproxy LBs.
type haproxyControllerTester struct {
client clientset.Interface
cfg string
rcName string
rcNamespace string
name string
address []string
}
func (h *haproxyControllerTester) getName() string {
return h.name
}
func (h *haproxyControllerTester) start(namespace string) (err error) {
// Create a replication controller with the given configuration.
framework.Logf("Parsing rc from %v", h.cfg)
rc, err := manifest.RcFromManifest(h.cfg)
Expect(err).NotTo(HaveOccurred())
rc.Namespace = namespace
rc.Spec.Template.Labels["name"] = rc.Name
// Add the --namespace arg.
// TODO: Remove this when we have proper namespace support.
for i, c := range rc.Spec.Template.Spec.Containers {
rc.Spec.Template.Spec.Containers[i].Args = append(
c.Args, fmt.Sprintf("--namespace=%v", namespace))
framework.Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args)
}
rc, err = h.client.CoreV1().ReplicationControllers(rc.Namespace).Create(rc)
if err != nil {
return
}
if err = framework.WaitForControlledPodsRunning(h.client, namespace, rc.Name, api.Kind("ReplicationController")); err != nil {
return
}
h.rcName = rc.Name
h.rcNamespace = rc.Namespace
// Find the pods of the rc we just created.
labelSelector := labels.SelectorFromSet(
labels.Set(map[string]string{"name": h.rcName}))
options := metav1.ListOptions{LabelSelector: labelSelector.String()}
pods, err := h.client.CoreV1().Pods(h.rcNamespace).List(options)
if err != nil {
return err
}
// Find the external addresses of the nodes the pods are running on.
for _, p := range pods.Items {
wait.Poll(1*time.Second, framework.ServiceRespondingTimeout, func() (bool, error) {
address, err := framework.GetHostExternalAddress(h.client, &p)
if err != nil {
framework.Logf("%v", err)
return false, nil
}
h.address = append(h.address, address)
return true, nil
})
}
if len(h.address) == 0 {
return fmt.Errorf("No external ips found for loadbalancer %v", h.getName())
}
return nil
}
func (h *haproxyControllerTester) stop() error {
return h.client.CoreV1().ReplicationControllers(h.rcNamespace).Delete(h.rcName, nil)
}
func (h *haproxyControllerTester) lookup(ingressKey string) string {
// The address of a service is the address of the lb/servicename, currently.
return fmt.Sprintf("http://%v/%v", h.address[0], ingressKey)
}
// ingManager starts an rc and the associated service.
type ingManager struct {
rcCfgPaths []string
svcCfgPaths []string
ingCfgPath string
name string
namespace string
client clientset.Interface
svcNames []string
}
func (s *ingManager) getName() string {
return s.name
}
func (s *ingManager) start(namespace string) (err error) {
// Create rcs
for _, rcPath := range s.rcCfgPaths {
framework.Logf("Parsing rc from %v", rcPath)
var rc *v1.ReplicationController
rc, err = manifest.RcFromManifest(rcPath)
Expect(err).NotTo(HaveOccurred())
rc.Namespace = namespace
rc.Spec.Template.Labels["name"] = rc.Name
rc, err = s.client.CoreV1().ReplicationControllers(rc.Namespace).Create(rc)
if err != nil {
return
}
if err = framework.WaitForControlledPodsRunning(s.client, rc.Namespace, rc.Name, api.Kind("ReplicationController")); err != nil {
return
}
}
// Create services.
// Note that it's up to the caller to make sure the service actually matches
// the pods of the rc.
for _, svcPath := range s.svcCfgPaths {
framework.Logf("Parsing service from %v", svcPath)
var svc *v1.Service
svc, err = manifest.SvcFromManifest(svcPath)
Expect(err).NotTo(HaveOccurred())
svc.Namespace = namespace
svc, err = s.client.CoreV1().Services(svc.Namespace).Create(svc)
if err != nil {
return
}
// TODO: This is short term till we have an Ingress.
s.svcNames = append(s.svcNames, svc.Name)
}
s.name = s.svcNames[0]
s.namespace = namespace
return nil
}
func (s *ingManager) test(path string) error {
url := fmt.Sprintf("%v/hostName", path)
httpClient := &http.Client{}
return wait.Poll(1*time.Second, framework.ServiceRespondingTimeout, func() (bool, error) {
body, err := framework.SimpleGET(httpClient, url, "")
if err != nil {
framework.Logf("%v\n%v\n%v", url, body, err)
return false, nil
}
return true, nil
})
}
var _ = SIGDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer]", func() {
// These variables are initialized after framework's beforeEach.
var ns string
var client clientset.Interface
f := framework.NewDefaultFramework("servicelb")
BeforeEach(func() {
client = f.ClientSet
ns = f.Namespace.Name
})
It("should support simple GET on Ingress ips", func() {
for _, t := range getLoadBalancerControllers(client) {
By(fmt.Sprintf("Starting loadbalancer controller %v in namespace %v", t.getName(), ns))
Expect(t.start(ns)).NotTo(HaveOccurred())
for _, s := range getIngManagers(client) {
By(fmt.Sprintf("Starting ingress manager %v in namespace %v", s.getName(), ns))
Expect(s.start(ns)).NotTo(HaveOccurred())
for _, sName := range s.svcNames {
path := t.lookup(sName)
framework.Logf("Testing path %v", path)
Expect(s.test(path)).NotTo(HaveOccurred())
}
}
Expect(t.stop()).NotTo(HaveOccurred())
}
})
})