vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -11,6 +11,7 @@ go_library(
"dns.go",
"dns_common.go",
"dns_configmap.go",
"dns_scale_records.go",
"doc.go",
"example_cluster_dns.go",
"firewall.go",
@ -31,7 +32,6 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/network",
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/cloudprovider:go_default_library",
@ -71,6 +71,7 @@ go_library(
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)

View File

@ -318,4 +318,5 @@ var _ = SIGDescribe("DNS", func() {
// TODO: Add more test cases for other DNSPolicies.
})
})

View File

@ -31,7 +31,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -56,9 +55,8 @@ type dnsTestCommon struct {
func newDnsTestCommon() dnsTestCommon {
return dnsTestCommon{
f: framework.NewDefaultFramework("dns-config-map"),
ns: "kube-system",
name: "kube-dns",
f: framework.NewDefaultFramework("dns-config-map"),
ns: "kube-system",
}
}
@ -73,6 +71,12 @@ func (t *dnsTestCommon) init() {
t.dnsPod = &pods.Items[0]
framework.Logf("Using DNS pod: %v", t.dnsPod.Name)
if strings.Contains(t.dnsPod.Name, "coredns") {
t.name = "coredns"
} else {
t.name = "kube-dns"
}
}
func (t *dnsTestCommon) checkDNSRecord(name string, predicate func([]string) bool, timeout time.Duration) {
@ -103,13 +107,18 @@ func (t *dnsTestCommon) checkDNSRecordFrom(name string, predicate func([]string)
func (t *dnsTestCommon) runDig(dnsName, target string) []string {
cmd := []string{"/usr/bin/dig", "+short"}
switch target {
case "coredns":
cmd = append(cmd, "@"+t.dnsPod.Status.PodIP)
case "kube-dns":
cmd = append(cmd, "@"+t.dnsPod.Status.PodIP, "-p", "10053")
case "dnsmasq":
case "cluster-dns":
break
default:
panic(fmt.Errorf("invalid target: " + target))
}
if strings.HasSuffix(dnsName, "in-addr.arpa") || strings.HasSuffix(dnsName, "in-addr.arpa.") {
cmd = append(cmd, []string{"-t", "ptr"}...)
}
cmd = append(cmd, dnsName)
stdout, stderr, err := t.f.ExecWithOptions(framework.ExecOptions{
@ -159,6 +168,24 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) {
}
}
func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string {
if t.name == "coredns" {
pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return pcm.Data
}
return nil
}
func (t *dnsTestCommon) restoreDNSConfigMap(configMapData map[string]string) {
if t.name == "coredns" {
t.setConfigMap(&v1.ConfigMap{Data: configMapData})
t.deleteCoreDNSPods()
} else {
t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
}
}
func (t *dnsTestCommon) deleteConfigMap() {
By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name))
t.cm = nil
@ -166,7 +193,7 @@ func (t *dnsTestCommon) deleteConfigMap() {
Expect(err).NotTo(HaveOccurred())
}
func (t *dnsTestCommon) createUtilPod() {
func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
// Actual port # doesn't matter, just needs to exist.
const servicePort = 10101
@ -176,8 +203,8 @@ func (t *dnsTestCommon) createUtilPod() {
},
ObjectMeta: metav1.ObjectMeta{
Namespace: t.f.Namespace.Name,
Labels: map[string]string{"app": "e2e-dns-configmap"},
GenerateName: "e2e-dns-configmap-",
Labels: map[string]string{"app": baseName},
GenerateName: baseName + "-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
@ -205,10 +232,10 @@ func (t *dnsTestCommon) createUtilPod() {
},
ObjectMeta: metav1.ObjectMeta{
Namespace: t.f.Namespace.Name,
Name: "e2e-dns-configmap",
Name: baseName,
},
Spec: v1.ServiceSpec{
Selector: map[string]string{"app": "e2e-dns-configmap"},
Selector: map[string]string{"app": baseName},
Ports: []v1.ServicePort{
{
Protocol: "TCP",
@ -232,6 +259,21 @@ func (t *dnsTestCommon) deleteUtilPod() {
}
}
// deleteCoreDNSPods manually deletes the CoreDNS pods to apply the changes to the ConfigMap.
func (t *dnsTestCommon) deleteCoreDNSPods() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(options)
podClient := t.c.CoreV1().Pods(metav1.NamespaceSystem)
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
}
func generateDNSServerPod(aRecords map[string]string) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
@ -266,8 +308,8 @@ func generateDNSServerPod(aRecords map[string]string) *v1.Pod {
return pod
}
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
t.dnsServerPod = generateDNSServerPod(aRecords)
func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) {
t.dnsServerPod = pod
var err error
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
@ -280,6 +322,40 @@ func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
Expect(err).NotTo(HaveOccurred())
}
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
t.createDNSPodFromObj(generateDNSServerPod(aRecords))
}
func (t *dnsTestCommon) createDNSServerWithPtrRecord() {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "e2e-dns-configmap-dns-server-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dns",
Image: imageutils.GetE2EImage(imageutils.DNSMasq),
Command: []string{
"/usr/sbin/dnsmasq",
"-u", "root",
"-k",
"--log-facility", "-",
"--host-record=my.test,192.0.2.123",
"-q",
},
},
},
DNSPolicy: "Default",
},
}
t.createDNSPodFromObj(pod)
}
func (t *dnsTestCommon) deleteDNSServerPod() {
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
if err := podClient.Delete(t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
@ -292,7 +368,7 @@ func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd, podHostName, servic
dnsPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "dns-test-" + string(uuid.NewUUID()),

View File

@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
@ -33,11 +34,19 @@ type dnsFederationsConfigMapTest struct {
isValid bool
}
var (
googleDnsHostname = "google-public-dns-a.google.com"
// The ConfigMap update mechanism takes longer than the standard
// wait.ForeverTestTimeout.
moreForeverTestTimeout = 2 * 60 * time.Second
)
var _ = SIGDescribe("DNS configMap federations", func() {
t := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()}
BeforeEach(func() { t.c = t.f.ClientSet })
It("should be able to change federation configuration [Slow][Serial]", func() {
t.c = t.f.ClientSet
t.run()
})
})
@ -46,7 +55,7 @@ func (t *dnsFederationsConfigMapTest) run() {
t.init()
defer t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
t.createUtilPod()
t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod()
t.validate()
@ -140,8 +149,10 @@ type dnsNameserverTest struct {
func (t *dnsNameserverTest) run() {
t.init()
t.createUtilPod()
t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod()
originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData)
t.createDNSServer(map[string]string{
"abc.acme.local": "1.1.1.1",
@ -150,46 +161,210 @@ func (t *dnsNameserverTest) run() {
})
defer t.deleteDNSServerPod()
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"stubDomains": fmt.Sprintf(`{"acme.local":["%v"]}`, t.dnsServerPod.Status.PodIP),
"upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP),
}})
if t.name == "coredns" {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"Corefile": fmt.Sprintf(`.:53 {
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
proxy . %v
}
acme.local:53 {
proxy . %v
}`, t.dnsServerPod.Status.PodIP, t.dnsServerPod.Status.PodIP),
}})
// The ConfigMap update mechanism takes longer than the standard
// wait.ForeverTestTimeout.
moreForeverTestTimeout := 2 * 60 * time.Second
t.deleteCoreDNSPods()
} else {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"stubDomains": fmt.Sprintf(`{"acme.local":["%v"]}`, t.dnsServerPod.Status.PodIP),
"upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP),
}})
}
t.checkDNSRecordFrom(
"abc.acme.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "1.1.1.1" },
"dnsmasq",
"cluster-dns",
moreForeverTestTimeout)
t.checkDNSRecordFrom(
"def.acme.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "2.2.2.2" },
"dnsmasq",
"cluster-dns",
moreForeverTestTimeout)
t.checkDNSRecordFrom(
"widget.local",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "3.3.3.3" },
"dnsmasq",
"cluster-dns",
moreForeverTestTimeout)
t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
t.restoreDNSConfigMap(originalConfigMapData)
// Wait for the deleted ConfigMap to take effect, otherwise the
// configuration can bleed into other tests.
t.checkDNSRecordFrom(
"abc.acme.local",
func(actual []string) bool { return len(actual) == 0 },
"dnsmasq",
"cluster-dns",
moreForeverTestTimeout)
}
var _ = SIGDescribe("DNS configMap nameserver", func() {
t := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()}
BeforeEach(func() { t.c = t.f.ClientSet })
type dnsPtrFwdTest struct {
dnsTestCommon
}
It("should be able to change stubDomain configuration [Slow][Serial]", func() {
t.run()
func (t *dnsPtrFwdTest) run() {
t.init()
t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod()
originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData)
t.createDNSServerWithPtrRecord()
defer t.deleteDNSServerPod()
// Should still be able to lookup public nameserver without explicit upstream nameserver set.
t.checkDNSRecordFrom(
"8.8.8.8.in-addr.arpa",
func(actual []string) bool { return len(actual) == 1 && actual[0] == googleDnsHostname+"." },
"cluster-dns",
moreForeverTestTimeout)
if t.name == "coredns" {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"Corefile": fmt.Sprintf(`.:53 {
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
proxy . %v
}`, t.dnsServerPod.Status.PodIP),
}})
t.deleteCoreDNSPods()
} else {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP),
}})
}
t.checkDNSRecordFrom(
"123.2.0.192.in-addr.arpa",
func(actual []string) bool { return len(actual) == 1 && actual[0] == "my.test." },
"cluster-dns",
moreForeverTestTimeout)
t.restoreDNSConfigMap(originalConfigMapData)
t.checkDNSRecordFrom(
"123.2.0.192.in-addr.arpa",
func(actual []string) bool { return len(actual) == 0 },
"cluster-dns",
moreForeverTestTimeout)
}
type dnsExternalNameTest struct {
dnsTestCommon
}
func (t *dnsExternalNameTest) run() {
t.init()
t.createUtilPodLabel("e2e-dns-configmap")
defer t.deleteUtilPod()
originalConfigMapData := t.fetchDNSConfigMapData()
defer t.restoreDNSConfigMap(originalConfigMapData)
fooHostname := "foo.example.com"
t.createDNSServer(map[string]string{
fooHostname: "192.0.2.123",
})
defer t.deleteDNSServerPod()
f := t.f
serviceName := "dns-externalname-upstream-test"
externalNameService := framework.CreateServiceSpec(serviceName, googleDnsHostname, false, nil)
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService); err != nil {
Fail(fmt.Sprintf("Failed when creating service: %v", err))
}
serviceNameLocal := "dns-externalname-upstream-local"
externalNameServiceLocal := framework.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil)
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameServiceLocal); err != nil {
Fail(fmt.Sprintf("Failed when creating service: %v", err))
}
defer func() {
By("deleting the test externalName service")
defer GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameServiceLocal.Name, nil)
}()
t.checkDNSRecordFrom(
fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name),
func(actual []string) bool {
return len(actual) >= 1 && actual[0] == googleDnsHostname+"."
},
"cluster-dns",
moreForeverTestTimeout)
if t.name == "coredns" {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"Corefile": fmt.Sprintf(`.:53 {
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
proxy . %v
}`, t.dnsServerPod.Status.PodIP),
}})
t.deleteCoreDNSPods()
} else {
t.setConfigMap(&v1.ConfigMap{Data: map[string]string{
"upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP),
}})
}
t.checkDNSRecordFrom(
fmt.Sprintf("%s.%s.svc.cluster.local", serviceNameLocal, f.Namespace.Name),
func(actual []string) bool {
return len(actual) == 2 && actual[0] == fooHostname+"." && actual[1] == "192.0.2.123"
},
"cluster-dns",
moreForeverTestTimeout)
t.restoreDNSConfigMap(originalConfigMapData)
}
var _ = SIGDescribe("DNS configMap nameserver", func() {
Context("Change stubDomain", func() {
nsTest := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()}
It("should be able to change stubDomain configuration [Slow][Serial]", func() {
nsTest.c = nsTest.f.ClientSet
nsTest.run()
})
})
Context("Forward PTR lookup", func() {
fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDnsTestCommon()}
It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() {
fwdTest.c = fwdTest.f.ClientSet
fwdTest.run()
})
})
Context("Forward external name lookup", func() {
externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDnsTestCommon()}
It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() {
externalNameTest.c = externalNameTest.f.ClientSet
externalNameTest.run()
})
})
})

View File

@ -0,0 +1,105 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"strconv"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
)
const (
parallelCreateServiceWorkers = 1
maxServicesPerCluster = 10000
checkServicePercent = 0.05
)
var _ = SIGDescribe("[Feature:PerformanceDNS]", func() {
f := framework.NewDefaultFramework("performancedns")
BeforeEach(func() {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout))
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)
err := framework.CheckTestingNSDeletedExcept(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
})
// answers dns for service - creates the maximum number of services, and then check dns record for one
It("Should answer DNS query for maximum number of services per cluster", func() {
services := generateServicesInNamespace(f.Namespace.Name, maxServicesPerCluster)
createService := func(i int) {
defer GinkgoRecover()
framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, f.Namespace.Name, services[i]))
}
framework.Logf("Creating %v test services", maxServicesPerCluster)
workqueue.Parallelize(parallelCreateServiceWorkers, len(services), createService)
dnsTest := dnsTestCommon{
f: f,
c: f.ClientSet,
ns: f.Namespace.Name,
}
dnsTest.createUtilPodLabel("e2e-dns-scale-records")
defer dnsTest.deleteUtilPod()
framework.Logf("Querying %v%% of service records", checkServicePercent*100)
for i := 0; i < len(services); i++ {
if i%(1/checkServicePercent) != 0 {
continue
}
s := services[i]
svc, err := f.ClientSet.CoreV1().Services(s.Namespace).Get(s.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
qname := fmt.Sprintf("%v.%v.svc.cluster.local", s.Name, s.Namespace)
framework.Logf("Querying %v expecting %v", qname, svc.Spec.ClusterIP)
dnsTest.checkDNSRecordFrom(
qname,
func(actual []string) bool {
return len(actual) == 1 && actual[0] == svc.Spec.ClusterIP
},
"cluster-dns",
wait.ForeverTestTimeout,
)
}
})
})
func generateServicesInNamespace(namespace string, num int) []*v1.Service {
services := make([]*v1.Service, num)
for i := 0; i < num; i++ {
services[i] = &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc-" + strconv.Itoa(i),
Namespace: namespace,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: 80,
}},
},
}
}
return services
}

View File

@ -31,6 +31,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
@ -40,12 +41,6 @@ import (
. "github.com/onsi/gomega"
)
const (
NEGAnnotation = "alpha.cloud.google.com/load-balancer-neg"
NEGUpdateTimeout = 2 * time.Minute
instanceGroupAnnotation = "ingress.gcp.kubernetes.io/instance-groups"
)
var _ = SIGDescribe("Loadbalancing: L7", func() {
defer GinkgoRecover()
var (
@ -125,18 +120,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// ip released when the rest of lb resources are deleted in CleanupGCEIngressController
ip := gceController.CreateStaticIP(ns)
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ns, ip))
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), ns, map[string]string{
framework.IngressStaticIPKey: ns,
framework.IngressAllowHTTPKey: "false",
}, map[string]string{})
By("waiting for Ingress to come up with ip: " + ip)
httpClient := framework.BuildInsecureClient(framework.IngressReqTimeout)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
By("should reject HTTP traffic")
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
executeStaticIPHttpsOnlyTest(f, jig, ns, ip)
By("should have correct firewall rule for ingress")
fw := gceController.GetFirewallRule()
@ -320,80 +304,47 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
})
It("should create ingress with pre-shared certificate", func() {
preSharedCertName := "test-pre-shared-cert"
By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
testHostname := "test.ingress.com"
cert, key, err := framework.GenerateRSACerts(testHostname, true)
Expect(err).NotTo(HaveOccurred())
gceCloud, err := framework.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
defer func() {
// We would not be able to delete the cert until ingress controller
// cleans up the target proxy that references it.
By("Deleting ingress before deleting ssl certificate")
if jig.Ingress != nil {
jig.TryDeleteIngress()
}
By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
framework.Logf("Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
return false, nil
}
return true, nil
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to delete ssl certificate %q: %v", preSharedCertName, err))
}()
_, err = gceCloud.CreateSslCertificate(&compute.SslCertificate{
Name: preSharedCertName,
Certificate: string(cert),
PrivateKey: string(key),
Description: "pre-shared cert for ingress testing",
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create ssl certificate %q: %v", preSharedCertName, err))
By("Creating an ingress referencing the pre-shared certificate")
// Create an ingress referencing this cert using pre-shared-cert annotation.
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "pre-shared-cert"), ns, map[string]string{
framework.IngressPreSharedCertKey: preSharedCertName,
framework.IngressAllowHTTPKey: "false",
}, map[string]string{})
By("Test that ingress works with the pre-shared certificate")
err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
executePresharedCertTest(f, jig, "")
})
It("should create ingress with backside re-encryption", func() {
By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
deployCreated, svcCreated, ingCreated, err := framework.CreateReencryptionIngress(f.ClientSet, f.Namespace.Name)
defer func() {
By("Cleaning up re-encryption ingress, service and deployment")
if errs := framework.CleanupReencryptionIngress(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 {
framework.Failf("Failed to cleanup re-encryption ingress: %v", errs)
}
}()
Expect(err).NotTo(HaveOccurred(), "Failed to create re-encryption ingress")
It("should create ingress with backend HTTPS", func() {
executeBacksideBacksideHTTPSTest(f, jig, "")
})
By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name))
ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP")
It("should support multiple TLS certs", func() {
By("Creating an ingress with no certs.")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "multiple-certs"), ns, map[string]string{
framework.IngressStaticIPKey: ns,
}, map[string]string{})
By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout}
err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
if err != nil {
framework.Logf("SimpleGET failed: %v", err)
return false, nil
}
if !strings.Contains(resp, "request_scheme=https") {
return false, fmt.Errorf("request wasn't served by HTTPS, response body: %s", resp)
}
framework.Logf("Poll succeeded, request was served by HTTPS")
return true, nil
})
Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress")
By("Adding multiple certs to the ingress.")
hosts := []string{"test1.ingress.com", "test2.ingress.com", "test3.ingress.com", "test4.ingress.com"}
secrets := []string{"tls-secret-1", "tls-secret-2", "tls-secret-3", "tls-secret-4"}
certs := [][]byte{}
for i, host := range hosts {
jig.AddHTTPS(secrets[i], host)
certs = append(certs, jig.GetRootCA(secrets[i]))
}
for i, host := range hosts {
err := jig.WaitForIngressWithCert(true, []string{host}, certs[i])
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
}
By("Remove all but one of the certs on the ingress.")
jig.RemoveHTTPS(secrets[1])
jig.RemoveHTTPS(secrets[2])
jig.RemoveHTTPS(secrets[3])
By("Test that the remaining cert is properly served.")
err := jig.WaitForIngressWithCert(true, []string{hosts[0]}, certs[0])
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
By("Add back one of the certs that was removed and check that all certs are served.")
jig.AddHTTPS(secrets[1], hosts[1])
for i, host := range hosts[:2] {
err := jig.WaitForIngressWithCert(true, []string{host}, certs[i])
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
}
})
It("multicluster ingress should get instance group annotation", func() {
@ -402,23 +353,104 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
framework.IngressClassKey: framework.MulticlusterIngressClassValue,
}, map[string]string{})
By(fmt.Sprintf("waiting for Ingress %s to come up", name))
By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
pollErr := wait.Poll(2*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
framework.ExpectNoError(err)
annotations := ing.Annotations
if annotations == nil || annotations[instanceGroupAnnotation] == "" {
framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", instanceGroupAnnotation, annotations)
if annotations == nil || annotations[framework.InstanceGroupAnnotation] == "" {
framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", framework.InstanceGroupAnnotation, annotations)
return false, nil
}
return true, nil
})
if pollErr != nil {
framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation))
framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, framework.InstanceGroupAnnotation))
}
// Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc.
// Note: All resources except the firewall rule have an annotation.
umKey := framework.StatusPrefix + "/url-map"
fwKey := framework.StatusPrefix + "/forwarding-rule"
tpKey := framework.StatusPrefix + "/target-proxy"
fwsKey := framework.StatusPrefix + "/https-forwarding-rule"
tpsKey := framework.StatusPrefix + "/https-target-proxy"
scKey := framework.StatusPrefix + "/ssl-cert"
beKey := framework.StatusPrefix + "/backends"
wait.Poll(2*time.Second, time.Minute, func() (bool, error) {
ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
framework.ExpectNoError(err)
annotations := ing.Annotations
if annotations != nil && (annotations[umKey] != "" || annotations[fwKey] != "" ||
annotations[tpKey] != "" || annotations[fwsKey] != "" || annotations[tpsKey] != "" ||
annotations[scKey] != "" || annotations[beKey] != "") {
framework.Failf("unexpected annotations. Expected to not have annotations for urlmap, forwarding rule, target proxy, ssl cert and backends, got: %v", annotations)
return true, nil
}
return false, nil
})
// Verify that the controller does not create any other resource except instance group.
// TODO(59778): Check GCE resources specific to this ingress instead of listing all resources.
if len(gceController.ListUrlMaps()) != 0 {
framework.Failf("unexpected url maps, expected none, got: %v", gceController.ListUrlMaps())
}
if len(gceController.ListGlobalForwardingRules()) != 0 {
framework.Failf("unexpected forwarding rules, expected none, got: %v", gceController.ListGlobalForwardingRules())
}
if len(gceController.ListTargetHttpProxies()) != 0 {
framework.Failf("unexpected target http proxies, expected none, got: %v", gceController.ListTargetHttpProxies())
}
if len(gceController.ListTargetHttpsProxies()) != 0 {
framework.Failf("unexpected target https proxies, expected none, got: %v", gceController.ListTargetHttpProxies())
}
if len(gceController.ListSslCertificates()) != 0 {
framework.Failf("unexpected ssl certificates, expected none, got: %v", gceController.ListSslCertificates())
}
if len(gceController.ListGlobalBackendServices()) != 0 {
framework.Failf("unexpected backend service, expected none, got: %v", gceController.ListGlobalBackendServices())
}
// Controller does not have a list command for firewall rule. We use get instead.
if fw, err := gceController.GetFirewallRuleOrError(); err == nil {
framework.Failf("unexpected nil error in getting firewall rule, expected firewall NotFound, got firewall: %v", fw)
}
// TODO(nikhiljindal): Check the instance group annotation value and verify with a multizone cluster.
})
It("should be able to switch between HTTPS and HTTP2 modes", func() {
httpsScheme := "request_scheme=https"
By("Create a basic HTTP2 ingress")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http2"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
address, err := jig.WaitForIngressAddress(jig.Client, jig.Ingress.Namespace, jig.Ingress.Name, framework.LoadBalancerPollTimeout)
By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTP2", address))
detectHttpVersionAndSchemeTest(f, jig, address, "request_version=2", httpsScheme)
By("Switch backend service to use HTTPS")
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[framework.ServiceApplicationProtocolKey] = `{"http2":"HTTPS"}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
detectHttpVersionAndSchemeTest(f, jig, address, "request_version=1.1", httpsScheme)
By("Switch backend service to use HTTP2")
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[framework.ServiceApplicationProtocolKey] = `{"http2":"HTTP2"}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
detectHttpVersionAndSchemeTest(f, jig, address, "request_version=2", httpsScheme)
})
// TODO: Implement a multizone e2e that verifies traffic reaches each
// zone based on pod labels.
})
@ -458,14 +490,14 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
It("should conform to Ingress spec", func() {
jig.PollInterval = 5 * time.Second
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{
NEGAnnotation: "true",
framework.NEGAnnotation: "true",
})
for _, t := range conformanceTests {
By(t.EntryLog)
t.Execute()
By(t.ExitLog)
jig.WaitForIngress(true)
usingNeg, err := gceController.BackendServiceUsingNEG(jig.GetIngressNodePorts(false))
usingNeg, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
Expect(err).NotTo(HaveOccurred())
Expect(usingNeg).To(BeTrue())
}
@ -476,7 +508,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetIngressNodePorts(false))
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
Expect(err).NotTo(HaveOccurred())
Expect(usingNEG).To(BeTrue())
@ -484,12 +516,12 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[NEGAnnotation] = "false"
svc.Annotations[framework.NEGAnnotation] = "false"
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
return gceController.BackendServiceUsingIG(jig.GetIngressNodePorts(true))
return gceController.BackendServiceUsingIG(jig.GetServicePorts(true))
})
jig.WaitForIngress(true)
@ -497,16 +529,32 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, svc := range svcList.Items {
svc.Annotations[NEGAnnotation] = "true"
svc.Annotations[framework.NEGAnnotation] = "true"
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred())
}
wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
return gceController.BackendServiceUsingNEG(jig.GetIngressNodePorts(false))
return gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
})
jig.WaitForIngress(true)
})
It("should be able to create a ClusterIP service [Unreleased]", func() {
var err error
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
svcPorts := jig.GetServicePorts(false)
usingNEG, err := gceController.BackendServiceUsingNEG(svcPorts)
Expect(err).NotTo(HaveOccurred())
Expect(usingNEG).To(BeTrue())
// ClusterIP ServicePorts have no NodePort
for _, sp := range svcPorts {
Expect(sp.NodePort).To(Equal(int32(0)))
}
})
It("should sync endpoints to NEG", func() {
name := "hostname"
scaleAndValidateNEG := func(num int) {
@ -517,7 +565,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
Expect(err).NotTo(HaveOccurred())
}
wait.Poll(10*time.Second, NEGUpdateTimeout, func() (bool, error) {
wait.Poll(10*time.Second, framework.NEGUpdateTimeout, func() (bool, error) {
res, err := jig.GetDistinctResponseFromIngress()
if err != nil {
return false, nil
@ -529,7 +577,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetIngressNodePorts(false))
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
Expect(err).NotTo(HaveOccurred())
Expect(usingNEG).To(BeTrue())
// initial replicas number is 1
@ -554,7 +602,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetIngressNodePorts(false))
usingNEG, err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
Expect(err).NotTo(HaveOccurred())
Expect(usingNEG).To(BeTrue())
@ -602,10 +650,29 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
})
Describe("GCE [Slow] [Feature:kubemci]", func() {
var gceController *framework.GCEIngressController
var ipName, ipAddress string
// Platform specific setup
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
jig.Class = framework.MulticlusterIngressClassValue
jig.PollInterval = 5 * time.Second
By("Initializing gce controller")
gceController = &framework.GCEIngressController{
Ns: ns,
Client: jig.Client,
Cloud: framework.TestContext.CloudConfig,
}
err := gceController.Init()
Expect(err).NotTo(HaveOccurred())
// TODO(https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/19):
// Kubemci should reserve a static ip if user has not specified one.
ipName = "kubemci-" + string(uuid.NewUUID())
// ip released when the rest of lb resources are deleted in CleanupGCEIngressController
ipAddress = gceController.CreateStaticIP(ipName)
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress))
})
// Platform specific cleanup
@ -615,22 +682,99 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}
if jig.Ingress == nil {
By("No ingress created, no cleanup necessary")
return
} else {
By("Deleting ingress")
jig.TryDeleteIngress()
}
By("Deleting ingress")
jig.TryDeleteIngress()
By("Cleaning up cloud resources")
Expect(gceController.CleanupGCEIngressController()).NotTo(HaveOccurred())
})
It("should conform to Ingress spec", func() {
jig.PollInterval = 5 * time.Second
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{
framework.IngressStaticIPKey: ipName,
})
for _, t := range conformanceTests {
By(t.EntryLog)
t.Execute()
By(t.ExitLog)
jig.WaitForIngress(true /*waitForNodePort*/)
jig.WaitForIngress(false /*waitForNodePort*/)
}
})
It("should create ingress with pre-shared certificate", func() {
executePresharedCertTest(f, jig, ipName)
})
It("should create ingress with backend HTTPS", func() {
executeBacksideBacksideHTTPSTest(f, jig, ipName)
})
It("should support https-only annotation", func() {
executeStaticIPHttpsOnlyTest(f, jig, ipName, ipAddress)
})
It("should remove clusters as expected", func() {
ingAnnotations := map[string]string{
framework.IngressStaticIPKey: ipName,
}
ingFilePath := filepath.Join(framework.IngressManifestPath, "http")
jig.CreateIngress(ingFilePath, ns, ingAnnotations, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/)
name := jig.Ingress.Name
// Verify that the ingress is spread to 1 cluster as expected.
verifyKubemciStatusHas(name, "is spread across 1 cluster")
// Validate that removing the ingress from all clusters throws an error.
// Reuse the ingress file created while creating the ingress.
filePath := filepath.Join(framework.TestContext.OutputDir, "mci.yaml")
output, err := framework.RunKubemciWithKubeconfig("remove-clusters", name, "--ingress="+filePath)
if err != nil {
framework.Failf("unexpected error in running kubemci remove-clusters command to remove from all clusters: %s", err)
}
if !strings.Contains(output, "You should use kubemci delete to delete the ingress completely") {
framework.Failf("unexpected output in removing an ingress from all clusters, expected the output to include: You should use kubemci delete to delete the ingress completely, actual output: %s", output)
}
// Verify that the ingress is still spread to 1 cluster as expected.
verifyKubemciStatusHas(name, "is spread across 1 cluster")
// remove-clusters should succeed with --force=true
if _, err := framework.RunKubemciWithKubeconfig("remove-clusters", name, "--ingress="+filePath, "--force=true"); err != nil {
framework.Failf("unexpected error in running kubemci remove-clusters to remove from all clusters with --force=true: %s", err)
}
verifyKubemciStatusHas(name, "is spread across 0 cluster")
})
It("single and multi-cluster ingresses should be able to exist together", func() {
By("Creating a single cluster ingress first")
jig.Class = ""
singleIngFilePath := filepath.Join(framework.IngressManifestPath, "static-ip-2")
jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/)
// jig.Ingress will be overwritten when we create MCI, so keep a reference.
singleIng := jig.Ingress
// Create the multi-cluster ingress next.
By("Creating a multi-cluster ingress next")
jig.Class = framework.MulticlusterIngressClassValue
ingAnnotations := map[string]string{
framework.IngressStaticIPKey: ipName,
}
multiIngFilePath := filepath.Join(framework.IngressManifestPath, "http")
jig.CreateIngress(multiIngFilePath, ns, ingAnnotations, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/)
mciIngress := jig.Ingress
By("Deleting the single cluster ingress and verifying that multi-cluster ingress continues to work")
jig.Ingress = singleIng
jig.Class = ""
jig.TryDeleteIngress()
jig.Ingress = mciIngress
jig.Class = framework.MulticlusterIngressClassValue
jig.WaitForIngress(false /*waitForNodePort*/)
By("Cleanup: Deleting the multi-cluster ingress")
jig.TryDeleteIngress()
})
})
// Time: borderline 5m, slow by design
@ -684,3 +828,133 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
})
})
})
// verifyKubemciStatusHas fails if kubemci get-status output for the given mci does not have the given expectedSubStr.
func verifyKubemciStatusHas(name, expectedSubStr string) {
statusStr, err := framework.RunKubemciCmd("get-status", name)
if err != nil {
framework.Failf("unexpected error in running kubemci get-status %s: %s", name, err)
}
if !strings.Contains(statusStr, expectedSubStr) {
framework.Failf("expected status to have sub string %s, actual status: %s", expectedSubStr, statusStr)
}
}
func executePresharedCertTest(f *framework.Framework, jig *framework.IngressTestJig, staticIPName string) {
preSharedCertName := "test-pre-shared-cert"
By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
testHostname := "test.ingress.com"
cert, key, err := framework.GenerateRSACerts(testHostname, true)
Expect(err).NotTo(HaveOccurred())
gceCloud, err := framework.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
defer func() {
// We would not be able to delete the cert until ingress controller
// cleans up the target proxy that references it.
By("Deleting ingress before deleting ssl certificate")
if jig.Ingress != nil {
jig.TryDeleteIngress()
}
By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
framework.Logf("Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
return false, nil
}
return true, nil
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to delete ssl certificate %q: %v", preSharedCertName, err))
}()
_, err = gceCloud.CreateSslCertificate(&compute.SslCertificate{
Name: preSharedCertName,
Certificate: string(cert),
PrivateKey: string(key),
Description: "pre-shared cert for ingress testing",
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create ssl certificate %q: %v", preSharedCertName, err))
By("Creating an ingress referencing the pre-shared certificate")
// Create an ingress referencing this cert using pre-shared-cert annotation.
ingAnnotations := map[string]string{
framework.IngressPreSharedCertKey: preSharedCertName,
// Disallow HTTP to save resources. This is irrelevant to the
// pre-shared cert test.
framework.IngressAllowHTTPKey: "false",
}
if staticIPName != "" {
ingAnnotations[framework.IngressStaticIPKey] = staticIPName
}
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{})
By("Test that ingress works with the pre-shared certificate")
err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
}
func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *framework.IngressTestJig, ipName, ip string) {
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{
framework.IngressStaticIPKey: ipName,
framework.IngressAllowHTTPKey: "false",
}, map[string]string{})
By("waiting for Ingress to come up with ip: " + ip)
httpClient := framework.BuildInsecureClient(framework.IngressReqTimeout)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
By("should reject HTTP traffic")
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
}
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *framework.IngressTestJig, staticIPName string) {
By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName)
defer func() {
By("Cleaning up re-encryption ingress, service and deployment")
if errs := jig.DeleteTestResource(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 {
framework.Failf("Failed to cleanup re-encryption ingress: %v", errs)
}
}()
Expect(err).NotTo(HaveOccurred(), "Failed to create re-encryption ingress")
By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name))
ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP")
By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout}
err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
if err != nil {
framework.Logf("SimpleGET failed: %v", err)
return false, nil
}
if !strings.Contains(resp, "request_scheme=https") {
return false, fmt.Errorf("request wasn't served by HTTPS, response body: %s", resp)
}
framework.Logf("Poll succeeded, request was served by HTTPS")
return true, nil
})
Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress")
}
func detectHttpVersionAndSchemeTest(f *framework.Framework, jig *framework.IngressTestJig, address, version, scheme string) {
timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout}
resp := ""
err := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", address), "")
if err != nil {
framework.Logf("SimpleGET failed: %v", err)
return false, nil
}
if !strings.Contains(resp, version) {
framework.Logf("Waiting for transition to HTTP/2")
return false, nil
}
if !strings.Contains(resp, scheme) {
return false, nil
}
framework.Logf("Poll succeeded, request was served by HTTP2")
return true, nil
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get %s or %s, response body: %s", version, scheme, resp))
}

View File

@ -53,7 +53,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
}
for _, lb := range serviceLBNames {
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Zone)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
serviceLBNames = []string{}
@ -73,7 +73,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
By("creating a Service of type LoadBalancer using the standard network tier")
svc := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard.ToGCEValue())
setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard))
})
// Verify that service has been updated properly.
svcTier, err := gcecloud.GetServiceNetworkTier(svc)
@ -120,7 +120,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
By("updating the Service to use the standard tier with a requested IP")
svc = jig.UpdateServiceOrFail(ns, svc.Name, func(svc *v1.Service) {
svc.Spec.LoadBalancerIP = requestedIP
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard.ToGCEValue())
setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard))
})
// Verify that service has been updated properly.
Expect(svc.Spec.LoadBalancerIP).To(Equal(requestedIP))

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/net"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -53,7 +52,7 @@ const (
)
var _ = SIGDescribe("Proxy", func() {
version := testapi.Groups[v1.GroupName].GroupVersion().Version
version := "v1"
Context("version "+version, func() {
options := framework.FrameworkOptions{
ClientQPS: -1.0,
@ -74,7 +73,6 @@ var _ = SIGDescribe("Proxy", func() {
subresource.
*/
framework.ConformanceIt("should proxy logs on node using proxy subresource ", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") })
It("should proxy to cadvisor using proxy subresource", func() { nodeProxyTest(f, prefix+"/nodes/", ":4194/proxy/containers/") })
// using the porter image to serve content, access the content
// (of multiple pods?) from multiple (endpoints/services?)
@ -163,7 +161,7 @@ var _ = SIGDescribe("Proxy", func() {
CreatedPods: &pods,
}
Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, cfg.Name)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name)
Expect(framework.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(HaveOccurred())

View File

@ -179,25 +179,27 @@ func (f *IngressScaleFramework) RunScaleTest() []error {
}
}
// currentNum keeps track of how many ingresses have been created.
currentNum := new(int)
// numIngsCreated keeps track of how many ingresses have been created.
numIngsCreated := 0
prepareIngsFunc := func(goalNum int) {
prepareIngsFunc := func(numIngsNeeded int) {
var ingWg sync.WaitGroup
numToCreate := goalNum - *currentNum
ingWg.Add(numToCreate)
errQueue := make(chan error, numToCreate)
latencyQueue := make(chan time.Duration, numToCreate)
numIngsToCreate := numIngsNeeded - numIngsCreated
ingWg.Add(numIngsToCreate)
svcQueue := make(chan *v1.Service, numIngsToCreate)
ingQueue := make(chan *extensions.Ingress, numIngsToCreate)
errQueue := make(chan error, numIngsToCreate)
latencyQueue := make(chan time.Duration, numIngsToCreate)
start := time.Now()
for ; *currentNum < goalNum; *currentNum++ {
suffix := fmt.Sprintf("%d", *currentNum)
for ; numIngsCreated < numIngsNeeded; numIngsCreated++ {
suffix := fmt.Sprintf("%d", numIngsCreated)
go func() {
defer ingWg.Done()
start := time.Now()
svcCreated, ingCreated, err := f.createScaleTestServiceIngress(suffix, f.EnableTLS)
f.ScaleTestSvcs = append(f.ScaleTestSvcs, svcCreated)
f.ScaleTestIngs = append(f.ScaleTestIngs, ingCreated)
svcQueue <- svcCreated
ingQueue <- ingCreated
if err != nil {
errQueue <- err
return
@ -214,11 +216,19 @@ func (f *IngressScaleFramework) RunScaleTest() []error {
}
// Wait until all ingress creations are complete.
f.Logger.Infof("Waiting for %d ingresses to come up...", numToCreate)
f.Logger.Infof("Waiting for %d ingresses to come up...", numIngsToCreate)
ingWg.Wait()
close(svcQueue)
close(ingQueue)
close(errQueue)
close(latencyQueue)
elapsed := time.Since(start)
for svc := range svcQueue {
f.ScaleTestSvcs = append(f.ScaleTestSvcs, svc)
}
for ing := range ingQueue {
f.ScaleTestIngs = append(f.ScaleTestIngs, ing)
}
var createLatencies []time.Duration
for latency := range latencyQueue {
createLatencies = append(createLatencies, latency)
@ -231,15 +241,15 @@ func (f *IngressScaleFramework) RunScaleTest() []error {
}
return
}
f.Logger.Infof("Spent %s for %d ingresses to come up", elapsed, numToCreate)
f.Logger.Infof("Spent %s for %d ingresses to come up", elapsed, numIngsToCreate)
f.BatchDurations = append(f.BatchDurations, elapsed)
}
measureCreateUpdateFunc := func() {
f.Logger.Infof("Create one more ingress and wait for it to come up")
start := time.Now()
svcCreated, ingCreated, err := f.createScaleTestServiceIngress(fmt.Sprintf("%d", *currentNum), f.EnableTLS)
*currentNum = *currentNum + 1
svcCreated, ingCreated, err := f.createScaleTestServiceIngress(fmt.Sprintf("%d", numIngsCreated), f.EnableTLS)
numIngsCreated = numIngsCreated + 1
f.ScaleTestSvcs = append(f.ScaleTestSvcs, svcCreated)
f.ScaleTestIngs = append(f.ScaleTestIngs, ingCreated)
if err != nil {
@ -440,7 +450,7 @@ func generateScaleTestBackendDeploymentSpec(numReplicas int32) *extensions.Deplo
Containers: []v1.Container{
{
Name: scaleTestBackendName,
Image: "gcr.io/google_containers/echoserver:1.6",
Image: "k8s.gcr.io/echoserver:1.10",
Ports: []v1.ContainerPort{{ContainerPort: 8080}},
ReadinessProbe: &v1.Probe{
Handler: v1.Handler{

View File

@ -106,11 +106,7 @@ func main() {
}
// Initializing a GCE client.
gceAlphaFeatureGate, err := gcecloud.NewAlphaFeatureGate([]string{})
if err != nil {
glog.Errorf("Encountered error for creating alpha feature gate: %v", err)
os.Exit(1)
}
gceAlphaFeatureGate := gcecloud.NewAlphaFeatureGate([]string{})
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
ProjectID: cloudConfig.ProjectID,
Region: cloudConfig.Region,

View File

@ -43,6 +43,36 @@ import (
. "github.com/onsi/gomega"
)
const (
defaultServeHostnameServicePort = 80
defaultServeHostnameServiceName = "svc-hostname"
)
var (
defaultServeHostnameService = v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: defaultServeHostnameServiceName,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: int32(defaultServeHostnameServicePort),
TargetPort: intstr.FromInt(9376),
Protocol: "TCP",
}},
Selector: map[string]string{
"name": defaultServeHostnameServiceName,
},
},
}
)
func getServeHostnameService(name string) *v1.Service {
svc := defaultServeHostnameService.DeepCopy()
svc.ObjectMeta.Name = name
svc.Spec.Selector["name"] = name
return svc
}
var _ = SIGDescribe("Services", func() {
f := framework.NewDefaultFramework("services")
@ -61,7 +91,7 @@ var _ = SIGDescribe("Services", func() {
}
for _, lb := range serviceLBNames {
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Zone)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
serviceLBNames = []string{}
@ -84,9 +114,9 @@ var _ = SIGDescribe("Services", func() {
valid/accessible endpoints (same port number for service and pods).
*/
framework.ConformanceIt("should serve a basic endpoint from pods ", func() {
// TODO: use the ServiceTestJig here
serviceName := "endpoint-test2"
ns := f.Namespace.Name
jig := framework.NewServiceTestJig(cs, serviceName)
labels := map[string]string{
"foo": "bar",
"baz": "blah",
@ -97,20 +127,12 @@ var _ = SIGDescribe("Services", func() {
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
Expect(err).NotTo(HaveOccurred())
}()
ports := []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}}
_, err := jig.CreateServiceWithServicePort(labels, ns, ports)
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
},
Spec: v1.ServiceSpec{
Selector: labels,
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
_, err := cs.CoreV1().Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred())
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{})
@ -149,10 +171,10 @@ var _ = SIGDescribe("Services", func() {
valid/accessible endpoints (different port number for pods).
*/
framework.ConformanceIt("should serve multiport endpoints from pods ", func() {
// TODO: use the ServiceTestJig here
// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
serviceName := "multi-endpoint-test"
ns := f.Namespace.Name
jig := framework.NewServiceTestJig(cs, serviceName)
defer func() {
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
@ -165,27 +187,19 @@ var _ = SIGDescribe("Services", func() {
svc2port := "svc2"
By("creating service " + serviceName + " in namespace " + ns)
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
ports := []v1.ServicePort{
{
Name: "portname1",
Port: 80,
TargetPort: intstr.FromString(svc1port),
},
Spec: v1.ServiceSpec{
Selector: labels,
Ports: []v1.ServicePort{
{
Name: "portname1",
Port: 80,
TargetPort: intstr.FromString(svc1port),
},
{
Name: "portname2",
Port: 81,
TargetPort: intstr.FromString(svc2port),
},
},
{
Name: "portname2",
Port: 81,
TargetPort: intstr.FromString(svc2port),
},
}
_, err := cs.CoreV1().Services(ns).Create(service)
_, err := jig.CreateServiceWithServicePort(labels, ns, ports)
Expect(err).NotTo(HaveOccurred())
port1 := 100
port2 := 101
@ -301,13 +315,13 @@ var _ = SIGDescribe("Services", func() {
framework.SkipUnlessSSHKeyPresent()
ns := f.Namespace.Name
numPods, servicePort := 3, 80
numPods, servicePort := 3, defaultServeHostnameServicePort
By("creating service1 in namespace " + ns)
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service1", servicePort, numPods)
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service1"), ns, numPods)
Expect(err).NotTo(HaveOccurred())
By("creating service2 in namespace " + ns)
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service2", servicePort, numPods)
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service2"), ns, numPods)
Expect(err).NotTo(HaveOccurred())
hosts, err := framework.NodeSSHHosts(cs)
@ -325,7 +339,7 @@ var _ = SIGDescribe("Services", func() {
// Stop service 1 and make sure it is gone.
By("stopping service1")
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service1"))
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1"))
By("verifying service1 is not up")
framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svc1IP, servicePort))
@ -334,7 +348,7 @@ var _ = SIGDescribe("Services", func() {
// Start another service and verify both are up.
By("creating service3 in namespace " + ns)
podNames3, svc3IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service3", servicePort, numPods)
podNames3, svc3IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service3"), ns, numPods)
Expect(err).NotTo(HaveOccurred())
if svc2IP == svc3IP {
@ -353,21 +367,21 @@ var _ = SIGDescribe("Services", func() {
framework.SkipUnlessProviderIs("gce", "gke")
ns := f.Namespace.Name
numPods, servicePort := 3, 80
numPods, servicePort := 3, defaultServeHostnameServicePort
svc1 := "service1"
svc2 := "service2"
defer func() {
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, svc1))
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1))
}()
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, svc1, servicePort, numPods)
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService(svc1), ns, numPods)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, svc2))
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2))
}()
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, svc2, servicePort, numPods)
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService(svc2), ns, numPods)
Expect(err).NotTo(HaveOccurred())
if svc1IP == svc2IP {
@ -412,9 +426,9 @@ var _ = SIGDescribe("Services", func() {
numPods, servicePort := 3, 80
defer func() {
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service1"))
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1"))
}()
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service1", servicePort, numPods)
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service1"), ns, numPods)
Expect(err).NotTo(HaveOccurred())
hosts, err := framework.NodeSSHHosts(cs)
@ -428,7 +442,7 @@ var _ = SIGDescribe("Services", func() {
// Restart apiserver
By("Restarting apiserver")
if err := framework.RestartApiserver(cs.Discovery()); err != nil {
if err := framework.RestartApiserver(cs); err != nil {
framework.Failf("error restarting apiserver: %v", err)
}
By("Waiting for apiserver to come up by polling /healthz")
@ -439,9 +453,9 @@ var _ = SIGDescribe("Services", func() {
// Create a new service and check if it's not reusing IP.
defer func() {
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service2"))
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service2"))
}()
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service2", servicePort, numPods)
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service2"), ns, numPods)
Expect(err).NotTo(HaveOccurred())
if svc1IP == svc2IP {
@ -485,7 +499,7 @@ var _ = SIGDescribe("Services", func() {
}
})
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #52495 is fixed.
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() {
// requires cloud load-balancer support
framework.SkipUnlessProviderIs("gce", "gke", "aws")
@ -1276,7 +1290,7 @@ var _ = SIGDescribe("Services", func() {
}
By("Scaling down replication controller to zero")
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false)
framework.ScaleRC(f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false)
By("Update service to not tolerate unready services")
_, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
@ -1412,7 +1426,7 @@ var _ = SIGDescribe("Services", func() {
framework.CheckReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPodName, svcIP)
})
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #52495 is fixed.
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
It("should be able to create an internal type load balancer [Slow] [DisabledForLargeClusters]", func() {
framework.SkipUnlessProviderIs("azure", "gke", "gce")
@ -1529,9 +1543,65 @@ var _ = SIGDescribe("Services", func() {
By("switching to ClusterIP type to destroy loadbalancer")
jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, createTimeout)
})
It("should have session affinity work for service with type clusterIP", func() {
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeClusterIP
execAffinityTestForNonLBService(f, cs, svc, false)
})
It("should be able to switch session affinity for service with type clusterIP", func() {
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeClusterIP
execAffinityTestForNonLBService(f, cs, svc, true)
})
It("should have session affinity work for NodePort service", func() {
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeNodePort
execAffinityTestForNonLBService(f, cs, svc, false)
})
It("should be able to switch session affinity for NodePort service", func() {
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeNodePort
execAffinityTestForNonLBService(f, cs, svc, true)
})
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() {
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
execAffinityTestForLBService(f, cs, svc, false)
})
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() {
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
execAffinityTestForLBService(f, cs, svc, true)
})
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() {
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
execAffinityTestForLBService(f, cs, svc, false)
})
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() {
svc := getServeHostnameService("service")
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
execAffinityTestForLBService(f, cs, svc, true)
})
})
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #52495 is fixed.
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
f := framework.NewDefaultFramework("esipp")
loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault
@ -1555,7 +1625,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
}
for _, lb := range serviceLBNames {
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Zone)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
serviceLBNames = []string{}
@ -1683,7 +1753,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
framework.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess)
Expect(jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)).NotTo(HaveOccurred())
}
framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, serviceName))
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName))
}
})
@ -1858,7 +1928,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam
timeout := 2 * time.Minute
framework.Logf("Waiting up to %v wget %s", timeout, serviceIPPort)
cmd := fmt.Sprintf(`wget -T 30 -qO- %s | grep client_address`, serviceIPPort)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
stdout, err = framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil {
framework.Logf("got err: %v, retry until timeout", err)
@ -1883,3 +1953,95 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam
}
return execPod.Status.PodIP, outputs[1]
}
// execAffinityTestForNonLBService is a helper function that wrap the logic of
// affinity test for non-load-balancer services. Session afinity will be
// enabled when the service is created. If parameter isTransitionTest is true,
// session affinity will be switched off/on and test if the service converges
// to a stable affinity state.
func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) {
ns := f.Namespace.Name
numPods, servicePort, serviceName := 3, defaultServeHostnameServicePort, svc.ObjectMeta.Name
By("creating service in namespace " + ns)
serviceType := svc.Spec.Type
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
_, _, err := framework.StartServeHostnameService(cs, f.InternalClientset, svc, ns, numPods)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.StopServeHostnameService(cs, ns, serviceName)
}()
jig := framework.NewServiceTestJig(cs, serviceName)
svc, err = jig.Client.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
var svcIp string
if serviceType == v1.ServiceTypeNodePort {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
addrs := framework.CollectAddresses(nodes, v1.NodeInternalIP)
Expect(len(addrs)).To(BeNumerically(">", 0), "Failed to get Node internal IP")
svcIp = addrs[0]
servicePort = int(svc.Spec.Ports[0].NodePort)
} else {
svcIp = svc.Spec.ClusterIP
}
execPodName := framework.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil)
defer func() {
framework.Logf("Cleaning up the exec pod")
err := cs.CoreV1().Pods(ns).Delete(execPodName, nil)
Expect(err).NotTo(HaveOccurred())
}()
execPod, err := cs.CoreV1().Pods(ns).Get(execPodName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if !isTransitionTest {
Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, true, false)).To(BeTrue())
}
if isTransitionTest {
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
})
Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, false, true)).To(BeTrue())
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
})
Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, true, true)).To(BeTrue())
}
}
// execAffinityTestForLBService is a helper function that wrap the logic of
// affinity test for load balancer services, similar to
// execAffinityTestForNonLBService.
func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) {
numPods, ns, serviceName := 3, f.Namespace.Name, svc.ObjectMeta.Name
By("creating service in namespace " + ns)
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
_, _, err := framework.StartServeHostnameService(cs, f.InternalClientset, svc, ns, numPods)
Expect(err).NotTo(HaveOccurred())
jig := framework.NewServiceTestJig(cs, serviceName)
By("waiting for loadbalancer for service " + ns + "/" + serviceName)
svc = jig.WaitForLoadBalancerOrFail(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault)
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
defer func() {
framework.StopServeHostnameService(cs, ns, serviceName)
lb := cloudprovider.GetLoadBalancerName(svc)
framework.Logf("cleaning gce resource for %s", lb)
framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}()
ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
port := int(svc.Spec.Ports[0].Port)
if !isTransitionTest {
Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true, false)).To(BeTrue())
}
if isTransitionTest {
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
})
Expect(framework.CheckAffinity(jig, nil, ingressIP, port, false, true)).To(BeTrue())
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
})
Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true, true)).To(BeTrue())
}
}

View File

@ -32,6 +32,7 @@ import (
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
@ -127,7 +128,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int) (output
cfg := testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Image: framework.GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Name: "svc-latency-rc",
Namespace: f.Namespace.Name,
Replicas: 1,