mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor updates
This commit is contained in:
10
vendor/k8s.io/kubernetes/test/e2e/network/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/network/BUILD
generated
vendored
@ -16,6 +16,7 @@ go_library(
|
||||
"firewall.go",
|
||||
"framework.go",
|
||||
"ingress.go",
|
||||
"ingress_scale.go",
|
||||
"kube_proxy.go",
|
||||
"network_policy.go",
|
||||
"network_tiers.go",
|
||||
@ -35,18 +36,22 @@ go_library(
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/controller/endpoint:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/network/scale:go_default_library",
|
||||
"//test/images/net/nat:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/networking/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
@ -78,6 +83,9 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/network/scale:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
351
vendor/k8s.io/kubernetes/test/e2e/network/dns.go
generated
vendored
351
vendor/k8s.io/kubernetes/test/e2e/network/dns.go
generated
vendored
@ -17,255 +17,22 @@ limitations under the License.
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const dnsTestPodHostName = "dns-querier-1"
|
||||
const dnsTestServiceName = "dns-test-service"
|
||||
|
||||
func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string) *v1.Pod {
|
||||
dnsPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dns-test-" + string(uuid.NewUUID()),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "results",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
// TODO: Consider scraping logs instead of running a webserver.
|
||||
{
|
||||
Name: "webserver",
|
||||
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 80,
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "querier",
|
||||
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
|
||||
Command: []string{"sh", "-c", wheezyProbeCmd},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "jessie-querier",
|
||||
Image: imageutils.GetE2EImage(imageutils.JessieDnsutils),
|
||||
Command: []string{"sh", "-c", jessieProbeCmd},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
dnsPod.Spec.Hostname = dnsTestPodHostName
|
||||
dnsPod.Spec.Subdomain = dnsTestServiceName
|
||||
|
||||
return dnsPod
|
||||
}
|
||||
|
||||
func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookupIP string, fileNamePrefix, namespace string) (string, []string) {
|
||||
fileNames := make([]string, 0, len(namesToResolve)*2)
|
||||
probeCmd := "for i in `seq 1 600`; do "
|
||||
for _, name := range namesToResolve {
|
||||
// Resolve by TCP and UDP DNS. Use $$(...) because $(...) is
|
||||
// expanded by kubernetes (though this won't expand so should
|
||||
// remain a literal, safe > sorry).
|
||||
lookup := "A"
|
||||
if strings.HasPrefix(name, "_") {
|
||||
lookup = "SRV"
|
||||
}
|
||||
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
|
||||
fileName = fmt.Sprintf("%s_tcp@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
|
||||
}
|
||||
|
||||
for _, name := range hostEntries {
|
||||
fileName := fmt.Sprintf("%s_hosts@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(getent hosts %s)" && echo OK > /results/%s;`, name, fileName)
|
||||
}
|
||||
|
||||
podARecByUDPFileName := fmt.Sprintf("%s_udp@PodARecord", fileNamePrefix)
|
||||
podARecByTCPFileName := fmt.Sprintf("%s_tcp@PodARecord", fileNamePrefix)
|
||||
probeCmd += fmt.Sprintf(`podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".%s.pod.cluster.local"}');`, namespace)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByUDPFileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByTCPFileName)
|
||||
fileNames = append(fileNames, podARecByUDPFileName)
|
||||
fileNames = append(fileNames, podARecByTCPFileName)
|
||||
|
||||
if len(ptrLookupIP) > 0 {
|
||||
ptrLookup := fmt.Sprintf("%s.in-addr.arpa.", strings.Join(reverseArray(strings.Split(ptrLookupIP, ".")), "."))
|
||||
ptrRecByUDPFileName := fmt.Sprintf("%s_udp@PTR", ptrLookupIP)
|
||||
ptrRecByTCPFileName := fmt.Sprintf("%s_tcp@PTR", ptrLookupIP)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByUDPFileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByTCPFileName)
|
||||
fileNames = append(fileNames, ptrRecByUDPFileName)
|
||||
fileNames = append(fileNames, ptrRecByTCPFileName)
|
||||
}
|
||||
|
||||
probeCmd += "sleep 1; done"
|
||||
return probeCmd, fileNames
|
||||
}
|
||||
|
||||
// createTargetedProbeCommand returns a command line that performs a DNS lookup for a specific record type
|
||||
func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePrefix string) (string, string) {
|
||||
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, nameToResolve)
|
||||
probeCmd := fmt.Sprintf("dig +short +tries=12 +norecurse %s %s > /results/%s", nameToResolve, lookup, fileName)
|
||||
return probeCmd, fileName
|
||||
}
|
||||
|
||||
func assertFilesExist(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) {
|
||||
assertFilesContain(fileNames, fileDir, pod, client, false, "")
|
||||
}
|
||||
|
||||
func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) {
|
||||
var failed []string
|
||||
|
||||
framework.ExpectNoError(wait.Poll(time.Second*10, time.Second*600, func() (bool, error) {
|
||||
failed = []string{}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
for _, fileName := range fileNames {
|
||||
contents, err := client.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(pod.Namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(pod.Name).
|
||||
Suffix(fileDir, fileName).
|
||||
Do().Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
} else {
|
||||
framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
}
|
||||
failed = append(failed, fileName)
|
||||
} else if check && strings.TrimSpace(string(contents)) != expected {
|
||||
framework.Logf("File %s from pod %s contains '%s' instead of '%s'", fileName, pod.Name, string(contents), expected)
|
||||
failed = append(failed, fileName)
|
||||
}
|
||||
}
|
||||
if len(failed) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
|
||||
return false, nil
|
||||
}))
|
||||
Expect(len(failed)).To(Equal(0))
|
||||
}
|
||||
|
||||
func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
}
|
||||
// Try to find results for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
assertFilesExist(fileNames, "results", pod, f.ClientSet)
|
||||
|
||||
// TODO: probe from the host, too.
|
||||
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
}
|
||||
// Try to find the expected value for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
|
||||
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func reverseArray(arr []string) []string {
|
||||
for i := 0; i < len(arr)/2; i++ {
|
||||
j := len(arr) - i - 1
|
||||
arr[i], arr[j] = arr[j], arr[i]
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("DNS", func() {
|
||||
f := framework.NewDefaultFramework("dns")
|
||||
|
||||
@ -295,7 +62,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
|
||||
})
|
||||
|
||||
@ -345,7 +112,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
pod.ObjectMeta.Labels = testServiceSelector
|
||||
|
||||
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
|
||||
@ -378,7 +145,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
pod1.ObjectMeta.Labels = testServiceSelector
|
||||
pod1.Spec.Hostname = podHostname
|
||||
pod1.Spec.Subdomain = serviceName
|
||||
@ -407,7 +174,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
|
||||
validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.")
|
||||
|
||||
@ -424,7 +191,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a second pod to probe DNS")
|
||||
pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
|
||||
validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.")
|
||||
|
||||
@ -444,11 +211,111 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a third pod to probe DNS")
|
||||
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
|
||||
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
|
||||
svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP)
|
||||
})
|
||||
|
||||
It("should support configurable pod resolv.conf", func() {
|
||||
By("Preparing a test DNS service with injected DNS names...")
|
||||
testInjectedIP := "1.1.1.1"
|
||||
testDNSNameShort := "notexistname"
|
||||
testSearchPath := "resolv.conf.local"
|
||||
testDNSNameFull := fmt.Sprintf("%s.%s", testDNSNameShort, testSearchPath)
|
||||
|
||||
testServerPod := generateDNSServerPod(map[string]string{
|
||||
testDNSNameFull: testInjectedIP,
|
||||
})
|
||||
testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s", testServerPod.Name)
|
||||
framework.Logf("Created pod %v", testServerPod)
|
||||
defer func() {
|
||||
framework.Logf("Deleting pod %s...", testServerPod.Name)
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("Failed to delete pod %s: %v", testServerPod.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name)
|
||||
|
||||
// Retrieve server pod IP.
|
||||
testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %v", testServerPod.Name)
|
||||
testServerIP := testServerPod.Status.PodIP
|
||||
framework.Logf("testServerIP is %s", testServerIP)
|
||||
|
||||
By("Creating a pod with dnsPolicy=None and customized dnsConfig...")
|
||||
testUtilsPod := generateDNSUtilsPod()
|
||||
testUtilsPod.Spec.DNSPolicy = v1.DNSNone
|
||||
testNdotsValue := "2"
|
||||
testUtilsPod.Spec.DNSConfig = &v1.PodDNSConfig{
|
||||
Nameservers: []string{testServerIP},
|
||||
Searches: []string{testSearchPath},
|
||||
Options: []v1.PodDNSConfigOption{
|
||||
{
|
||||
Name: "ndots",
|
||||
Value: &testNdotsValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s", testUtilsPod.Name)
|
||||
framework.Logf("Created pod %v", testUtilsPod)
|
||||
defer func() {
|
||||
framework.Logf("Deleting pod %s...", testUtilsPod.Name)
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name)
|
||||
|
||||
By("Verifying customized DNS option is configured on pod...")
|
||||
// TODO: Figure out a better way other than checking the actual resolv,conf file.
|
||||
cmd := []string{"cat", "/etc/resolv.conf"}
|
||||
stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{
|
||||
Command: cmd,
|
||||
Namespace: f.Namespace.Name,
|
||||
PodName: testUtilsPod.Name,
|
||||
ContainerName: "util",
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err)
|
||||
if !strings.Contains(stdout, "ndots:2") {
|
||||
framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout)
|
||||
}
|
||||
|
||||
By("Verifying customized name server and search path are working...")
|
||||
// Do dig on not-exist-dns-name and see if the injected DNS record is returned.
|
||||
// This verifies both:
|
||||
// - Custom search path is appended.
|
||||
// - DNS query is sent to the specified server.
|
||||
cmd = []string{"/usr/bin/dig", "+short", "+search", testDNSNameShort}
|
||||
digFunc := func() (bool, error) {
|
||||
stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{
|
||||
Command: cmd,
|
||||
Namespace: f.Namespace.Name,
|
||||
PodName: testUtilsPod.Name,
|
||||
ContainerName: "util",
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err)
|
||||
return false, nil
|
||||
}
|
||||
res := strings.Split(stdout, "\n")
|
||||
if len(res) != 1 || res[0] != testInjectedIP {
|
||||
framework.Logf("Expect command `%v` to return %s, got: %v", cmd, testInjectedIP, res)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
err = wait.PollImmediate(5*time.Second, 3*time.Minute, digFunc)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to verify customized name server and search path")
|
||||
|
||||
// TODO: Add more test cases for other DNSPolicies.
|
||||
})
|
||||
})
|
||||
|
268
vendor/k8s.io/kubernetes/test/e2e/network/dns_common.go
generated
vendored
268
vendor/k8s.io/kubernetes/test/e2e/network/dns_common.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@ -27,8 +28,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -229,20 +232,19 @@ func (t *dnsTestCommon) deleteUtilPod() {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
|
||||
t.dnsServerPod = &v1.Pod{
|
||||
func generateDNSServerPod(aRecords map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: t.f.Namespace.Name,
|
||||
GenerateName: "e2e-dns-configmap-dns-server-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dns",
|
||||
Image: "gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5",
|
||||
Image: imageutils.GetE2EImage(imageutils.DNSMasq),
|
||||
Command: []string{
|
||||
"/usr/sbin/dnsmasq",
|
||||
"-u", "root",
|
||||
@ -257,10 +259,15 @@ func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
|
||||
}
|
||||
|
||||
for name, ip := range aRecords {
|
||||
t.dnsServerPod.Spec.Containers[0].Command = append(
|
||||
t.dnsServerPod.Spec.Containers[0].Command,
|
||||
pod.Spec.Containers[0].Command = append(
|
||||
pod.Spec.Containers[0].Command,
|
||||
fmt.Sprintf("-A/%v/%v", name, ip))
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
|
||||
t.dnsServerPod = generateDNSServerPod(aRecords)
|
||||
|
||||
var err error
|
||||
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
|
||||
@ -280,3 +287,252 @@ func (t *dnsTestCommon) deleteDNSServerPod() {
|
||||
t.utilPod.Namespace, t.dnsServerPod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd, podHostName, serviceName string) *v1.Pod {
|
||||
dnsPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dns-test-" + string(uuid.NewUUID()),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "results",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
// TODO: Consider scraping logs instead of running a webserver.
|
||||
{
|
||||
Name: "webserver",
|
||||
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 80,
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "querier",
|
||||
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
|
||||
Command: []string{"sh", "-c", wheezyProbeCmd},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "jessie-querier",
|
||||
Image: imageutils.GetE2EImage(imageutils.JessieDnsutils),
|
||||
Command: []string{"sh", "-c", jessieProbeCmd},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "results",
|
||||
MountPath: "/results",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
dnsPod.Spec.Hostname = podHostName
|
||||
dnsPod.Spec.Subdomain = serviceName
|
||||
|
||||
return dnsPod
|
||||
}
|
||||
|
||||
func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookupIP string, fileNamePrefix, namespace string) (string, []string) {
|
||||
fileNames := make([]string, 0, len(namesToResolve)*2)
|
||||
probeCmd := "for i in `seq 1 600`; do "
|
||||
for _, name := range namesToResolve {
|
||||
// Resolve by TCP and UDP DNS. Use $$(...) because $(...) is
|
||||
// expanded by kubernetes (though this won't expand so should
|
||||
// remain a literal, safe > sorry).
|
||||
lookup := "A"
|
||||
if strings.HasPrefix(name, "_") {
|
||||
lookup = "SRV"
|
||||
}
|
||||
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
|
||||
fileName = fmt.Sprintf("%s_tcp@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
|
||||
}
|
||||
|
||||
for _, name := range hostEntries {
|
||||
fileName := fmt.Sprintf("%s_hosts@%s", fileNamePrefix, name)
|
||||
fileNames = append(fileNames, fileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(getent hosts %s)" && echo OK > /results/%s;`, name, fileName)
|
||||
}
|
||||
|
||||
podARecByUDPFileName := fmt.Sprintf("%s_udp@PodARecord", fileNamePrefix)
|
||||
podARecByTCPFileName := fmt.Sprintf("%s_tcp@PodARecord", fileNamePrefix)
|
||||
probeCmd += fmt.Sprintf(`podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".%s.pod.cluster.local"}');`, namespace)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByUDPFileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByTCPFileName)
|
||||
fileNames = append(fileNames, podARecByUDPFileName)
|
||||
fileNames = append(fileNames, podARecByTCPFileName)
|
||||
|
||||
if len(ptrLookupIP) > 0 {
|
||||
ptrLookup := fmt.Sprintf("%s.in-addr.arpa.", strings.Join(reverseArray(strings.Split(ptrLookupIP, ".")), "."))
|
||||
ptrRecByUDPFileName := fmt.Sprintf("%s_udp@PTR", ptrLookupIP)
|
||||
ptrRecByTCPFileName := fmt.Sprintf("%s_tcp@PTR", ptrLookupIP)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByUDPFileName)
|
||||
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByTCPFileName)
|
||||
fileNames = append(fileNames, ptrRecByUDPFileName)
|
||||
fileNames = append(fileNames, ptrRecByTCPFileName)
|
||||
}
|
||||
|
||||
probeCmd += "sleep 1; done"
|
||||
return probeCmd, fileNames
|
||||
}
|
||||
|
||||
// createTargetedProbeCommand returns a command line that performs a DNS lookup for a specific record type
|
||||
func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePrefix string) (string, string) {
|
||||
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, nameToResolve)
|
||||
probeCmd := fmt.Sprintf("dig +short +tries=12 +norecurse %s %s > /results/%s", nameToResolve, lookup, fileName)
|
||||
return probeCmd, fileName
|
||||
}
|
||||
|
||||
func assertFilesExist(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) {
|
||||
assertFilesContain(fileNames, fileDir, pod, client, false, "")
|
||||
}
|
||||
|
||||
func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) {
|
||||
var failed []string
|
||||
|
||||
framework.ExpectNoError(wait.Poll(time.Second*10, time.Second*600, func() (bool, error) {
|
||||
failed = []string{}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
for _, fileName := range fileNames {
|
||||
contents, err := client.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(pod.Namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(pod.Name).
|
||||
Suffix(fileDir, fileName).
|
||||
Do().Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
} else {
|
||||
framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
|
||||
}
|
||||
failed = append(failed, fileName)
|
||||
} else if check && strings.TrimSpace(string(contents)) != expected {
|
||||
framework.Logf("File %s from pod %s contains '%s' instead of '%s'", fileName, pod.Name, string(contents), expected)
|
||||
failed = append(failed, fileName)
|
||||
}
|
||||
}
|
||||
if len(failed) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
|
||||
return false, nil
|
||||
}))
|
||||
Expect(len(failed)).To(Equal(0))
|
||||
}
|
||||
|
||||
func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
}
|
||||
// Try to find results for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
assertFilesExist(fileNames, "results", pod, f.ClientSet)
|
||||
|
||||
// TODO: probe from the host, too.
|
||||
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
}
|
||||
// Try to find the expected value for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
|
||||
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func reverseArray(arr []string) []string {
|
||||
for i := 0; i < len(arr)/2; i++ {
|
||||
j := len(arr) - i - 1
|
||||
arr[i], arr[j] = arr[j], arr[i]
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
func generateDNSUtilsPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "e2e-dns-utils-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "util",
|
||||
Image: imageutils.GetE2EImage(imageutils.Dnsutils),
|
||||
Command: []string{"sleep", "10000"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/network/firewall.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/network/firewall.go
generated
vendored
@ -170,7 +170,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
|
||||
Expect(len(nodeAddrs)).NotTo(BeZero())
|
||||
masterAddr := framework.GetMasterAddress(cs)
|
||||
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.ControllerManagerPort, framework.FirewallTestTcpTimeout)
|
||||
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, framework.FirewallTestTcpTimeout)
|
||||
Expect(flag).To(BeTrue())
|
||||
flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, framework.FirewallTestTcpTimeout)
|
||||
Expect(flag).To(BeTrue())
|
||||
|
305
vendor/k8s.io/kubernetes/test/e2e/network/ingress.go
generated
vendored
305
vendor/k8s.io/kubernetes/test/e2e/network/ingress.go
generated
vendored
@ -18,14 +18,22 @@ package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -83,7 +91,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
gceController.Init()
|
||||
err := gceController.Init()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
@ -99,7 +108,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
jig.TryDeleteIngress()
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
framework.CleanupGCEIngressController(gceController)
|
||||
Expect(gceController.CleanupGCEIngressController()).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
@ -118,8 +127,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ns, ip))
|
||||
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), ns, map[string]string{
|
||||
"kubernetes.io/ingress.global-static-ip-name": ns,
|
||||
"kubernetes.io/ingress.allow-http": "false",
|
||||
framework.IngressStaticIPKey: ns,
|
||||
framework.IngressAllowHTTPKey: "false",
|
||||
}, map[string]string{})
|
||||
|
||||
By("waiting for Ingress to come up with ip: " + ip)
|
||||
@ -153,10 +162,244 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
// framework.ExpectNoError(jig.verifyURL(fmt.Sprintf("https://%v/", ip), "", 30, 1*time.Second, httpClient))
|
||||
})
|
||||
|
||||
It("should update ingress while sync failures occur on other ingresses", func() {
|
||||
By("Creating ingresses that would fail on sync.")
|
||||
ingFailTLSBackend := &extensions.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ing-fail-on-tls-backend",
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
TLS: []extensions.IngressTLS{
|
||||
{SecretName: "tls-secret-notexist"},
|
||||
},
|
||||
Backend: &extensions.IngressBackend{
|
||||
ServiceName: "echoheaders-notexist",
|
||||
ServicePort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := jig.Client.ExtensionsV1beta1().Ingresses(ns).Create(ingFailTLSBackend)
|
||||
defer func() {
|
||||
if err := jig.Client.ExtensionsV1beta1().Ingresses(ns).Delete(ingFailTLSBackend.Name, nil); err != nil {
|
||||
framework.Logf("Failed to delete ingress %s: %v", ingFailTLSBackend.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ingFailRules := &extensions.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ing-fail-on-rules",
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
Rules: []extensions.IngressRule{
|
||||
{
|
||||
Host: "foo.bar.com",
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: []extensions.HTTPIngressPath{
|
||||
{
|
||||
Path: "/foo",
|
||||
Backend: extensions.IngressBackend{
|
||||
ServiceName: "echoheaders-notexist",
|
||||
ServicePort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = jig.Client.ExtensionsV1beta1().Ingresses(ns).Create(ingFailRules)
|
||||
defer func() {
|
||||
if err := jig.Client.ExtensionsV1beta1().Ingresses(ns).Delete(ingFailRules.Name, nil); err != nil {
|
||||
framework.Logf("Failed to delete ingress %s: %v", ingFailRules.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a basic HTTP ingress and wait for it to come up")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, nil, nil)
|
||||
jig.WaitForIngress(true)
|
||||
|
||||
By("Updating the path on ingress and wait for it to take effect")
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
updatedRule := extensions.IngressRule{
|
||||
Host: "ingress.test.com",
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: []extensions.HTTPIngressPath{
|
||||
{
|
||||
Path: "/test",
|
||||
// Copy backend from the first rule.
|
||||
Backend: ing.Spec.Rules[0].HTTP.Paths[0].Backend,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Replace the first rule.
|
||||
ing.Spec.Rules[0] = updatedRule
|
||||
})
|
||||
// Wait for change to take effect on the updated ingress.
|
||||
jig.WaitForIngress(false)
|
||||
})
|
||||
|
||||
It("should not reconcile manually modified health check for ingress", func() {
|
||||
By("Creating a basic HTTP ingress and wait for it to come up.")
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, nil, nil)
|
||||
jig.WaitForIngress(true)
|
||||
|
||||
// Get cluster UID.
|
||||
clusterID, err := framework.GetClusterID(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Get the related nodeports.
|
||||
nodePorts := jig.GetIngressNodePorts(false)
|
||||
Expect(len(nodePorts)).ToNot(Equal(0))
|
||||
|
||||
// Filter health check using cluster UID as the suffix.
|
||||
By("Retrieving relevant health check resources from GCE.")
|
||||
gceCloud := gceController.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
hcs, err := gceCloud.ListHealthChecks()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var hcToChange *compute.HealthCheck
|
||||
for _, hc := range hcs {
|
||||
if strings.HasSuffix(hc.Name, clusterID) {
|
||||
Expect(hc.HttpHealthCheck).NotTo(BeNil())
|
||||
if fmt.Sprintf("%d", hc.HttpHealthCheck.Port) == nodePorts[0] {
|
||||
hcToChange = hc
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
Expect(hcToChange).NotTo(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Modifying health check %v without involving ingress.", hcToChange.Name))
|
||||
// Change timeout from 60s to 25s.
|
||||
hcToChange.TimeoutSec = 25
|
||||
// Change path from /healthz to /.
|
||||
hcToChange.HttpHealthCheck.RequestPath = "/"
|
||||
err = gceCloud.UpdateHealthCheck(hcToChange)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Add one more path to ingress to trigger resource syncing.
|
||||
By("Adding a new path to ingress and wait for it to take effect.")
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.Rules = append(ing.Spec.Rules, extensions.IngressRule{
|
||||
Host: "ingress.test.com",
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: []extensions.HTTPIngressPath{
|
||||
{
|
||||
Path: "/test",
|
||||
// Copy backend from the first rule.
|
||||
Backend: ing.Spec.Rules[0].HTTP.Paths[0].Backend,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
// Wait for change to take effect before checking the health check resource.
|
||||
jig.WaitForIngress(false)
|
||||
|
||||
// Validate the modified fields on health check are intact.
|
||||
By("Checking if the modified health check is unchanged.")
|
||||
hcAfterSync, err := gceCloud.GetHealthCheck(hcToChange.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(hcAfterSync.HttpHealthCheck).ToNot(Equal(nil))
|
||||
Expect(hcAfterSync.TimeoutSec).To(Equal(hcToChange.TimeoutSec))
|
||||
Expect(hcAfterSync.HttpHealthCheck.RequestPath).To(Equal(hcToChange.HttpHealthCheck.RequestPath))
|
||||
})
|
||||
|
||||
It("should create ingress with pre-shared certificate", func() {
|
||||
preSharedCertName := "test-pre-shared-cert"
|
||||
By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
|
||||
testHostname := "test.ingress.com"
|
||||
cert, key, err := framework.GenerateRSACerts(testHostname, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
// We would not be able to delete the cert until ingress controller
|
||||
// cleans up the target proxy that references it.
|
||||
By("Deleting ingress before deleting ssl certificate")
|
||||
if jig.Ingress != nil {
|
||||
jig.TryDeleteIngress()
|
||||
}
|
||||
By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
|
||||
err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
|
||||
framework.Logf("Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to delete ssl certificate %q: %v", preSharedCertName, err))
|
||||
}()
|
||||
_, err = gceCloud.CreateSslCertificate(&compute.SslCertificate{
|
||||
Name: preSharedCertName,
|
||||
Certificate: string(cert),
|
||||
PrivateKey: string(key),
|
||||
Description: "pre-shared cert for ingress testing",
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create ssl certificate %q: %v", preSharedCertName, err))
|
||||
|
||||
By("Creating an ingress referencing the pre-shared certificate")
|
||||
// Create an ingress referencing this cert using pre-shared-cert annotation.
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "pre-shared-cert"), ns, map[string]string{
|
||||
framework.IngressPreSharedCertKey: preSharedCertName,
|
||||
framework.IngressAllowHTTPKey: "false",
|
||||
}, map[string]string{})
|
||||
|
||||
By("Test that ingress works with the pre-shared certificate")
|
||||
err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
})
|
||||
|
||||
It("should create ingress with backside re-encryption", func() {
|
||||
By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
|
||||
deployCreated, svcCreated, ingCreated, err := framework.CreateReencryptionIngress(f.ClientSet, f.Namespace.Name)
|
||||
defer func() {
|
||||
By("Cleaning up re-encryption ingress, service and deployment")
|
||||
if errs := framework.CleanupReencryptionIngress(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 {
|
||||
framework.Failf("Failed to cleanup re-encryption ingress: %v", errs)
|
||||
}
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create re-encryption ingress")
|
||||
|
||||
By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name))
|
||||
ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP")
|
||||
|
||||
By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
|
||||
timeoutClient := &http.Client{Timeout: framework.IngressReqTimeout}
|
||||
err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
|
||||
if err != nil {
|
||||
framework.Logf("SimpleGET failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if !strings.Contains(resp, "request_scheme=https") {
|
||||
return false, fmt.Errorf("request wasn't served by HTTPS, response body: %s", resp)
|
||||
}
|
||||
framework.Logf("Poll succeeded, request was served by HTTPS")
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress")
|
||||
})
|
||||
|
||||
It("multicluster ingress should get instance group annotation", func() {
|
||||
name := "echomap"
|
||||
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "http"), ns, map[string]string{
|
||||
framework.IngressClass: framework.MulticlusterIngressClassValue,
|
||||
framework.IngressClassKey: framework.MulticlusterIngressClassValue,
|
||||
}, map[string]string{})
|
||||
|
||||
By(fmt.Sprintf("waiting for Ingress %s to come up", name))
|
||||
@ -179,6 +422,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
// TODO: Implement a multizone e2e that verifies traffic reaches each
|
||||
// zone based on pod labels.
|
||||
})
|
||||
|
||||
Describe("GCE [Slow] [Feature:NEG]", func() {
|
||||
var gceController *framework.GCEIngressController
|
||||
|
||||
@ -191,7 +435,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
gceController.Init()
|
||||
err := gceController.Init()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
@ -207,7 +452,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
jig.TryDeleteIngress()
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
framework.CleanupGCEIngressController(gceController)
|
||||
Expect(gceController.CleanupGCEIngressController()).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
@ -272,12 +517,12 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
wait.Poll(5*time.Second, NEGUpdateTimeout, func() (bool, error) {
|
||||
wait.Poll(10*time.Second, NEGUpdateTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress()
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
return res.Len() == num, err
|
||||
return res.Len() == num, nil
|
||||
})
|
||||
}
|
||||
|
||||
@ -319,12 +564,12 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
scale.Spec.Replicas = int32(replicas)
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).UpdateScale(name, scale)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress()
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
return res.Len() == replicas, err
|
||||
return res.Len() == replicas, nil
|
||||
})
|
||||
|
||||
By("Trigger rolling update and observe service disruption")
|
||||
@ -335,7 +580,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(ns).Update(deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
wait.Poll(30*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
deploy, err := f.ClientSet.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||
@ -356,6 +601,38 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GCE [Slow] [Feature:kubemci]", func() {
|
||||
// Platform specific setup
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
jig.Class = framework.MulticlusterIngressClassValue
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
By("Deleting ingress")
|
||||
jig.TryDeleteIngress()
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = framework.CreateIngressComformanceTests(jig, ns, map[string]string{})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
t.Execute()
|
||||
By(t.ExitLog)
|
||||
jig.WaitForIngress(true /*waitForNodePort*/)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Time: borderline 5m, slow by design
|
||||
Describe("[Slow] Nginx", func() {
|
||||
var nginxController *framework.NginxIngressController
|
||||
|
64
vendor/k8s.io/kubernetes/test/e2e/network/ingress_scale.go
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/test/e2e/network/ingress_scale.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/network/scale"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() {
|
||||
defer GinkgoRecover()
|
||||
var (
|
||||
ns string
|
||||
)
|
||||
f := framework.NewDefaultFramework("ingress-scale")
|
||||
|
||||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() {
|
||||
var (
|
||||
scaleFramework *scale.IngressScaleFramework
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig)
|
||||
if err := scaleFramework.PrepareScaleTest(); err != nil {
|
||||
framework.Failf("Unexpected error while preraring ingress scale test: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 {
|
||||
framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs)
|
||||
}
|
||||
})
|
||||
|
||||
It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() {
|
||||
if errs := scaleFramework.RunScaleTest(); len(errs) != 0 {
|
||||
framework.Failf("Unexpected error while running ingress scale test: %v", errs)
|
||||
}
|
||||
|
||||
})
|
||||
})
|
||||
})
|
10
vendor/k8s.io/kubernetes/test/e2e/network/kube_proxy.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/network/kube_proxy.go
generated
vendored
@ -171,19 +171,19 @@ var _ = SIGDescribe("Network", func() {
|
||||
// If test flakes occur here, then this check should be performed
|
||||
// in a loop as there may be a race with the client connecting.
|
||||
framework.IssueSSHCommandWithResult(
|
||||
fmt.Sprintf("sudo cat /proc/net/ip_conntrack | grep 'dport=%v'",
|
||||
fmt.Sprintf("sudo cat /proc/net/nf_conntrack | grep 'dport=%v'",
|
||||
testDaemonTcpPort),
|
||||
framework.TestContext.Provider,
|
||||
clientNodeInfo.node)
|
||||
|
||||
// Timeout in seconds is available as the third column from
|
||||
// /proc/net/ip_conntrack.
|
||||
// Timeout in seconds is available as the fifth column from
|
||||
// /proc/net/nf_conntrack.
|
||||
result, err := framework.IssueSSHCommandWithResult(
|
||||
fmt.Sprintf(
|
||||
"sudo cat /proc/net/ip_conntrack "+
|
||||
"sudo cat /proc/net/nf_conntrack "+
|
||||
"| grep 'CLOSE_WAIT.*dst=%v.*dport=%v' "+
|
||||
"| tail -n 1"+
|
||||
"| awk '{print $3}' ",
|
||||
"| awk '{print $5}' ",
|
||||
serverNodeInfo.nodeIp,
|
||||
testDaemonTcpPort),
|
||||
framework.TestContext.Provider,
|
||||
|
29
vendor/k8s.io/kubernetes/test/e2e/network/network_tiers.go
generated
vendored
29
vendor/k8s.io/kubernetes/test/e2e/network/network_tiers.go
generated
vendored
@ -26,9 +26,9 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -39,14 +39,12 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
f := framework.NewDefaultFramework("services")
|
||||
|
||||
var cs clientset.Interface
|
||||
var internalClientset internalclientset.Interface
|
||||
serviceLBNames := []string{}
|
||||
|
||||
BeforeEach(func() {
|
||||
// This test suite requires the GCE environment.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
cs = f.ClientSet
|
||||
internalClientset = f.InternalClientset
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
@ -75,12 +73,12 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
By("creating a Service of type LoadBalancer using the standard network tier")
|
||||
svc := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard)
|
||||
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard.ToGCEValue())
|
||||
})
|
||||
// Verify that service has been updated properly.
|
||||
svcTier, err := gcecloud.GetServiceNetworkTier(svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(svcTier).To(Equal(gcecloud.NetworkTierStandard))
|
||||
Expect(svcTier).To(Equal(cloud.NetworkTierStandard))
|
||||
// Record the LB name for test cleanup.
|
||||
serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc))
|
||||
|
||||
@ -95,7 +93,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
// Verify that service has been updated properly.
|
||||
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(svcTier).To(Equal(gcecloud.NetworkTierDefault))
|
||||
Expect(svcTier).To(Equal(cloud.NetworkTierDefault))
|
||||
|
||||
// Wait until the ingress IP changes. Each tier has its own pool of
|
||||
// IPs, so changing tiers implies changing IPs.
|
||||
@ -104,12 +102,14 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
// Test 3: create a standard-tierd LB with a user-requested IP.
|
||||
By("reserving a static IP for the load balancer")
|
||||
requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunId)
|
||||
requestedIP, err := reserveAlphaRegionalAddress(requestedAddrName, gcecloud.NetworkTierStandard)
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to reserve a STANDARD tiered address")
|
||||
defer func() {
|
||||
if requestedAddrName != "" {
|
||||
// Release GCE static address - this is not kube-managed and will not be automatically released.
|
||||
if err := framework.DeleteGCEStaticIP(requestedAddrName); err != nil {
|
||||
if err := gceCloud.DeleteRegionAddress(requestedAddrName, gceCloud.Region()); err != nil {
|
||||
framework.Logf("failed to release static IP address %q: %v", requestedAddrName, err)
|
||||
}
|
||||
}
|
||||
@ -120,13 +120,13 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
By("updating the Service to use the standard tier with a requested IP")
|
||||
svc = jig.UpdateServiceOrFail(ns, svc.Name, func(svc *v1.Service) {
|
||||
svc.Spec.LoadBalancerIP = requestedIP
|
||||
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard)
|
||||
setNetworkTier(svc, gcecloud.NetworkTierAnnotationStandard.ToGCEValue())
|
||||
})
|
||||
// Verify that service has been updated properly.
|
||||
Expect(svc.Spec.LoadBalancerIP).To(Equal(requestedIP))
|
||||
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(svcTier).To(Equal(gcecloud.NetworkTierStandard))
|
||||
Expect(svcTier).To(Equal(cloud.NetworkTierStandard))
|
||||
|
||||
// Wait until the ingress IP changes and verifies the LB.
|
||||
ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout)
|
||||
@ -169,7 +169,7 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin
|
||||
return ingressIP
|
||||
}
|
||||
|
||||
func getLBNetworkTierByIP(ip string) (gcecloud.NetworkTier, error) {
|
||||
func getLBNetworkTierByIP(ip string) (cloud.NetworkTier, error) {
|
||||
var rule *computealpha.ForwardingRule
|
||||
// Retry a few times to tolerate flakes.
|
||||
err := wait.PollImmediate(5*time.Second, 15*time.Second, func() (bool, error) {
|
||||
@ -183,7 +183,7 @@ func getLBNetworkTierByIP(ip string) (gcecloud.NetworkTier, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gcecloud.NetworkTierGCEValueToType(rule.NetworkTier), nil
|
||||
return cloud.NetworkTierGCEValueToType(rule.NetworkTier), nil
|
||||
}
|
||||
|
||||
func getGCEForwardingRuleByIP(ip string) (*computealpha.ForwardingRule, error) {
|
||||
@ -195,7 +195,7 @@ func getGCEForwardingRuleByIP(ip string) (*computealpha.ForwardingRule, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, rule := range ruleList.Items {
|
||||
for _, rule := range ruleList {
|
||||
if rule.IPAddress == ip {
|
||||
return rule, nil
|
||||
}
|
||||
@ -221,8 +221,7 @@ func clearNetworkTier(svc *v1.Service) {
|
||||
|
||||
// TODO: add retries if this turns out to be flaky.
|
||||
// TODO(#51665): remove this helper function once Network Tiers becomes beta.
|
||||
func reserveAlphaRegionalAddress(name string, netTier gcecloud.NetworkTier) (string, error) {
|
||||
cloud, err := framework.GetGCECloud()
|
||||
func reserveAlphaRegionalAddress(cloud *gcecloud.GCECloud, name string, netTier cloud.NetworkTier) (string, error) {
|
||||
alphaAddr := &computealpha.Address{
|
||||
Name: name,
|
||||
NetworkTier: netTier.ToGCEValue(),
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go
generated
vendored
@ -149,9 +149,9 @@ func networkingIPerfTest(isIPv6 bool) {
|
||||
}
|
||||
})
|
||||
}
|
||||
fmt.Println("[begin] Node,Bandwith CSV")
|
||||
fmt.Println("[begin] Node,Bandwidth CSV")
|
||||
fmt.Println(iperfResults.ToTSV())
|
||||
fmt.Println("[end] Node,Bandwith CSV")
|
||||
fmt.Println("[end] Node,Bandwidth CSV")
|
||||
|
||||
for ipClient, bandwidth := range iperfResults.BandwidthMap {
|
||||
framework.Logf("%v had bandwidth %v. Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth))
|
||||
|
45
vendor/k8s.io/kubernetes/test/e2e/network/proxy.go
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/network/proxy.go
generated
vendored
@ -61,22 +61,6 @@ var _ = SIGDescribe("Proxy", func() {
|
||||
f := framework.NewFramework("proxy", options, nil)
|
||||
prefix := "/api/" + version
|
||||
|
||||
// Port here has to be kept in sync with default kubelet port.
|
||||
/*
|
||||
Testname: proxy-prefix-node-logs-port
|
||||
Description: Ensure that proxy on node logs works with generic top
|
||||
level prefix proxy and explicit kubelet port.
|
||||
*/
|
||||
framework.ConformanceIt("should proxy logs on node with explicit kubelet port ", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":10250/logs/") })
|
||||
|
||||
/*
|
||||
Testname: proxy-prefix-node-logs
|
||||
Description: Ensure that proxy on node logs works with generic top
|
||||
level prefix proxy.
|
||||
*/
|
||||
framework.ConformanceIt("should proxy logs on node ", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", "/logs/") })
|
||||
It("should proxy to cadvisor", func() { nodeProxyTest(f, prefix+"/proxy/nodes/", ":4194/containers/") })
|
||||
|
||||
/*
|
||||
Testname: proxy-subresource-node-logs-port
|
||||
Description: Ensure that proxy on node logs works with node proxy
|
||||
@ -185,36 +169,15 @@ var _ = SIGDescribe("Proxy", func() {
|
||||
|
||||
// table constructors
|
||||
// Try proxying through the service and directly to through the pod.
|
||||
svcProxyURL := func(scheme, port string) string {
|
||||
return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port)
|
||||
}
|
||||
subresourceServiceProxyURL := func(scheme, port string) string {
|
||||
return prefix + "/namespaces/" + f.Namespace.Name + "/services/" + net.JoinSchemeNamePort(scheme, service.Name, port) + "/proxy"
|
||||
}
|
||||
podProxyURL := func(scheme, port string) string {
|
||||
return prefix + "/proxy/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port)
|
||||
}
|
||||
subresourcePodProxyURL := func(scheme, port string) string {
|
||||
return prefix + "/namespaces/" + f.Namespace.Name + "/pods/" + net.JoinSchemeNamePort(scheme, pods[0].Name, port) + "/proxy"
|
||||
}
|
||||
|
||||
// construct the table
|
||||
expectations := map[string]string{
|
||||
svcProxyURL("", "portname1") + "/": "foo",
|
||||
svcProxyURL("", "80") + "/": "foo",
|
||||
svcProxyURL("", "portname2") + "/": "bar",
|
||||
svcProxyURL("", "81") + "/": "bar",
|
||||
|
||||
svcProxyURL("http", "portname1") + "/": "foo",
|
||||
svcProxyURL("http", "80") + "/": "foo",
|
||||
svcProxyURL("http", "portname2") + "/": "bar",
|
||||
svcProxyURL("http", "81") + "/": "bar",
|
||||
|
||||
svcProxyURL("https", "tlsportname1") + "/": "tls baz",
|
||||
svcProxyURL("https", "443") + "/": "tls baz",
|
||||
svcProxyURL("https", "tlsportname2") + "/": "tls qux",
|
||||
svcProxyURL("https", "444") + "/": "tls qux",
|
||||
|
||||
subresourceServiceProxyURL("", "portname1") + "/": "foo",
|
||||
subresourceServiceProxyURL("http", "portname1") + "/": "foo",
|
||||
subresourceServiceProxyURL("", "portname2") + "/": "bar",
|
||||
@ -222,14 +185,6 @@ var _ = SIGDescribe("Proxy", func() {
|
||||
subresourceServiceProxyURL("https", "tlsportname1") + "/": "tls baz",
|
||||
subresourceServiceProxyURL("https", "tlsportname2") + "/": "tls qux",
|
||||
|
||||
podProxyURL("", "1080") + "/": `<a href="` + podProxyURL("", "1080") + `/rewriteme">test</a>`,
|
||||
podProxyURL("", "160") + "/": "foo",
|
||||
podProxyURL("", "162") + "/": "bar",
|
||||
|
||||
podProxyURL("http", "1080") + "/": `<a href="` + podProxyURL("http", "1080") + `/rewriteme">test</a>`,
|
||||
podProxyURL("http", "160") + "/": "foo",
|
||||
podProxyURL("http", "162") + "/": "bar",
|
||||
|
||||
subresourcePodProxyURL("", "") + "/": `<a href="` + subresourcePodProxyURL("", "") + `/rewriteme">test</a>`,
|
||||
subresourcePodProxyURL("", "1080") + "/": `<a href="` + subresourcePodProxyURL("", "1080") + `/rewriteme">test</a>`,
|
||||
subresourcePodProxyURL("http", "1080") + "/": `<a href="` + subresourcePodProxyURL("http", "1080") + `/rewriteme">test</a>`,
|
||||
|
33
vendor/k8s.io/kubernetes/test/e2e/network/scale/BUILD
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/test/e2e/network/scale/BUILD
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["ingress.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/network/scale",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/network/scale/localrun:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
463
vendor/k8s.io/kubernetes/test/e2e/network/scale/ingress.go
generated
vendored
Normal file
463
vendor/k8s.io/kubernetes/test/e2e/network/scale/ingress.go
generated
vendored
Normal file
@ -0,0 +1,463 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
numIngressesSmall = 5
|
||||
numIngressesMedium = 20
|
||||
numIngressesLarge = 50
|
||||
numIngressesExtraLarge = 99
|
||||
|
||||
scaleTestIngressNamePrefix = "ing-scale"
|
||||
scaleTestBackendName = "echoheaders-scale"
|
||||
scaleTestSecretName = "tls-secret-scale"
|
||||
scaleTestHostname = "scale.ingress.com"
|
||||
scaleTestNumBackends = 10
|
||||
scaleTestPollInterval = 15 * time.Second
|
||||
|
||||
// We don't expect waitForIngress to take longer
|
||||
// than waitForIngressMaxTimeout.
|
||||
waitForIngressMaxTimeout = 80 * time.Minute
|
||||
ingressesCleanupTimeout = 80 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
scaleTestLabels = map[string]string{
|
||||
"app": scaleTestBackendName,
|
||||
}
|
||||
)
|
||||
|
||||
// IngressScaleFramework defines the framework for ingress scale testing.
|
||||
type IngressScaleFramework struct {
|
||||
Clientset clientset.Interface
|
||||
Jig *framework.IngressTestJig
|
||||
GCEController *framework.GCEIngressController
|
||||
CloudConfig framework.CloudConfig
|
||||
Logger framework.TestLogger
|
||||
|
||||
Namespace string
|
||||
EnableTLS bool
|
||||
NumIngressesTest []int
|
||||
OutputFile string
|
||||
|
||||
ScaleTestDeploy *extensions.Deployment
|
||||
ScaleTestSvcs []*v1.Service
|
||||
ScaleTestIngs []*extensions.Ingress
|
||||
|
||||
// BatchCreateLatencies stores all ingress creation latencies, in different
|
||||
// batches.
|
||||
BatchCreateLatencies [][]time.Duration
|
||||
// BatchDurations stores the total duration for each ingress batch creation.
|
||||
BatchDurations []time.Duration
|
||||
// StepCreateLatencies stores the single ingress creation latency, which happens
|
||||
// after each ingress batch creation is complete.
|
||||
StepCreateLatencies []time.Duration
|
||||
// StepCreateLatencies stores the single ingress update latency, which happens
|
||||
// after each ingress batch creation is complete.
|
||||
StepUpdateLatencies []time.Duration
|
||||
}
|
||||
|
||||
// NewIngressScaleFramework returns a new framework for ingress scale testing.
|
||||
func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig framework.CloudConfig) *IngressScaleFramework {
|
||||
return &IngressScaleFramework{
|
||||
Namespace: ns,
|
||||
Clientset: cs,
|
||||
CloudConfig: cloudConfig,
|
||||
Logger: &framework.E2ELogger{},
|
||||
EnableTLS: true,
|
||||
NumIngressesTest: []int{
|
||||
numIngressesSmall,
|
||||
numIngressesMedium,
|
||||
numIngressesLarge,
|
||||
numIngressesExtraLarge,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PrepareScaleTest prepares framework for ingress scale testing.
|
||||
func (f *IngressScaleFramework) PrepareScaleTest() error {
|
||||
f.Logger.Infof("Initializing ingress test suite and gce controller...")
|
||||
f.Jig = framework.NewIngressTestJig(f.Clientset)
|
||||
f.Jig.Logger = f.Logger
|
||||
f.Jig.PollInterval = scaleTestPollInterval
|
||||
f.GCEController = &framework.GCEIngressController{
|
||||
Client: f.Clientset,
|
||||
Cloud: f.CloudConfig,
|
||||
}
|
||||
if err := f.GCEController.Init(); err != nil {
|
||||
return fmt.Errorf("Failed to initialize GCE controller: %v", err)
|
||||
}
|
||||
|
||||
f.ScaleTestSvcs = []*v1.Service{}
|
||||
f.ScaleTestIngs = []*extensions.Ingress{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanupScaleTest cleans up framework for ingress scale testing.
|
||||
func (f *IngressScaleFramework) CleanupScaleTest() []error {
|
||||
var errs []error
|
||||
|
||||
f.Logger.Infof("Cleaning up ingresses...")
|
||||
for _, ing := range f.ScaleTestIngs {
|
||||
if ing != nil {
|
||||
if err := f.Clientset.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
f.Logger.Infof("Cleaning up services...")
|
||||
for _, svc := range f.ScaleTestSvcs {
|
||||
if svc != nil {
|
||||
if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
if f.ScaleTestDeploy != nil {
|
||||
f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name)
|
||||
if err := f.Clientset.ExtensionsV1beta1().Deployments(f.ScaleTestDeploy.Namespace).Delete(f.ScaleTestDeploy.Name, nil); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
f.Logger.Infof("Cleaning up cloud resources...")
|
||||
if err := f.GCEController.CleanupGCEIngressControllerWithTimeout(ingressesCleanupTimeout); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// RunScaleTest runs ingress scale testing.
|
||||
func (f *IngressScaleFramework) RunScaleTest() []error {
|
||||
var errs []error
|
||||
|
||||
testDeploy := generateScaleTestBackendDeploymentSpec(scaleTestNumBackends)
|
||||
f.Logger.Infof("Creating deployment %s...", testDeploy.Name)
|
||||
testDeploy, err := f.Jig.Client.ExtensionsV1beta1().Deployments(f.Namespace).Create(testDeploy)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("Failed to create deployment %s: %v", testDeploy.Name, err))
|
||||
return errs
|
||||
}
|
||||
f.ScaleTestDeploy = testDeploy
|
||||
|
||||
if f.EnableTLS {
|
||||
f.Logger.Infof("Ensuring TLS secret %s...", scaleTestSecretName)
|
||||
if err := f.Jig.PrepareTLSSecret(f.Namespace, scaleTestSecretName, scaleTestHostname); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Failed to prepare TLS secret %s: %v", scaleTestSecretName, err))
|
||||
return errs
|
||||
}
|
||||
}
|
||||
|
||||
// currentNum keeps track of how many ingresses have been created.
|
||||
currentNum := new(int)
|
||||
|
||||
prepareIngsFunc := func(goalNum int) {
|
||||
var ingWg sync.WaitGroup
|
||||
numToCreate := goalNum - *currentNum
|
||||
ingWg.Add(numToCreate)
|
||||
errQueue := make(chan error, numToCreate)
|
||||
latencyQueue := make(chan time.Duration, numToCreate)
|
||||
start := time.Now()
|
||||
for ; *currentNum < goalNum; *currentNum++ {
|
||||
suffix := fmt.Sprintf("%d", *currentNum)
|
||||
go func() {
|
||||
defer ingWg.Done()
|
||||
|
||||
start := time.Now()
|
||||
svcCreated, ingCreated, err := f.createScaleTestServiceIngress(suffix, f.EnableTLS)
|
||||
f.ScaleTestSvcs = append(f.ScaleTestSvcs, svcCreated)
|
||||
f.ScaleTestIngs = append(f.ScaleTestIngs, ingCreated)
|
||||
if err != nil {
|
||||
errQueue <- err
|
||||
return
|
||||
}
|
||||
f.Logger.Infof("Waiting for ingress %s to come up...", ingCreated.Name)
|
||||
if err := f.Jig.WaitForGivenIngressWithTimeout(ingCreated, false, waitForIngressMaxTimeout); err != nil {
|
||||
errQueue <- err
|
||||
return
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
f.Logger.Infof("Spent %s for ingress %s to come up", elapsed, ingCreated.Name)
|
||||
latencyQueue <- elapsed
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait until all ingress creations are complete.
|
||||
f.Logger.Infof("Waiting for %d ingresses to come up...", numToCreate)
|
||||
ingWg.Wait()
|
||||
close(errQueue)
|
||||
close(latencyQueue)
|
||||
elapsed := time.Since(start)
|
||||
var createLatencies []time.Duration
|
||||
for latency := range latencyQueue {
|
||||
createLatencies = append(createLatencies, latency)
|
||||
}
|
||||
f.BatchCreateLatencies = append(f.BatchCreateLatencies, createLatencies)
|
||||
if len(errQueue) != 0 {
|
||||
f.Logger.Errorf("Failed while creating services and ingresses, spent %v", elapsed)
|
||||
for err := range errQueue {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
f.Logger.Infof("Spent %s for %d ingresses to come up", elapsed, numToCreate)
|
||||
f.BatchDurations = append(f.BatchDurations, elapsed)
|
||||
}
|
||||
|
||||
measureCreateUpdateFunc := func() {
|
||||
f.Logger.Infof("Create one more ingress and wait for it to come up")
|
||||
start := time.Now()
|
||||
svcCreated, ingCreated, err := f.createScaleTestServiceIngress(fmt.Sprintf("%d", *currentNum), f.EnableTLS)
|
||||
*currentNum = *currentNum + 1
|
||||
f.ScaleTestSvcs = append(f.ScaleTestSvcs, svcCreated)
|
||||
f.ScaleTestIngs = append(f.ScaleTestIngs, ingCreated)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
|
||||
f.Logger.Infof("Waiting for ingress %s to come up...", ingCreated.Name)
|
||||
if err := f.Jig.WaitForGivenIngressWithTimeout(ingCreated, false, waitForIngressMaxTimeout); err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
f.Logger.Infof("Spent %s for ingress %s to come up", elapsed, ingCreated.Name)
|
||||
f.StepCreateLatencies = append(f.StepCreateLatencies, elapsed)
|
||||
|
||||
f.Logger.Infof("Updating ingress and wait for change to take effect")
|
||||
ingToUpdate, err := f.Clientset.ExtensionsV1beta1().Ingresses(f.Namespace).Get(ingCreated.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
addTestPathToIngress(ingToUpdate)
|
||||
start = time.Now()
|
||||
ingToUpdate, err = f.Clientset.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ingToUpdate)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := f.Jig.WaitForGivenIngressWithTimeout(ingToUpdate, false, waitForIngressMaxTimeout); err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
elapsed = time.Since(start)
|
||||
f.Logger.Infof("Spent %s for updating ingress %s", elapsed, ingToUpdate.Name)
|
||||
f.StepUpdateLatencies = append(f.StepUpdateLatencies, elapsed)
|
||||
}
|
||||
|
||||
defer f.dumpLatencies()
|
||||
|
||||
for _, num := range f.NumIngressesTest {
|
||||
f.Logger.Infof("Create more ingresses until we reach %d ingresses", num)
|
||||
prepareIngsFunc(num)
|
||||
f.Logger.Infof("Measure create and update latency with %d ingresses", num)
|
||||
measureCreateUpdateFunc()
|
||||
|
||||
if len(errs) != 0 {
|
||||
return errs
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (f *IngressScaleFramework) dumpLatencies() {
|
||||
f.Logger.Infof("Dumping scale test latencies...")
|
||||
formattedData := f.GetFormattedLatencies()
|
||||
if f.OutputFile != "" {
|
||||
f.Logger.Infof("Dumping scale test latencies to file %s...", f.OutputFile)
|
||||
ioutil.WriteFile(f.OutputFile, []byte(formattedData), 0644)
|
||||
return
|
||||
}
|
||||
f.Logger.Infof("\n%v", formattedData)
|
||||
}
|
||||
|
||||
// GetFormattedLatencies returns the formatted latencies output.
|
||||
// TODO: Need a better way/format for data output.
|
||||
func (f *IngressScaleFramework) GetFormattedLatencies() string {
|
||||
if len(f.NumIngressesTest) == 0 ||
|
||||
len(f.NumIngressesTest) != len(f.BatchCreateLatencies) ||
|
||||
len(f.NumIngressesTest) != len(f.BatchDurations) ||
|
||||
len(f.NumIngressesTest) != len(f.StepCreateLatencies) ||
|
||||
len(f.NumIngressesTest) != len(f.StepUpdateLatencies) {
|
||||
return "Failed to construct latencies output."
|
||||
}
|
||||
|
||||
res := "--- Procedure logs ---\n"
|
||||
for i, latencies := range f.BatchCreateLatencies {
|
||||
res += fmt.Sprintf("Create %d ingresses parallelly, each of them takes below amount of time before starts serving traffic:\n", len(latencies))
|
||||
for _, latency := range latencies {
|
||||
res = res + fmt.Sprintf("- %v\n", latency)
|
||||
}
|
||||
res += fmt.Sprintf("Total duration for completing %d ingress creations: %v\n", len(latencies), f.BatchDurations[i])
|
||||
res += fmt.Sprintf("Duration to create one more ingress with %d ingresses existing: %v\n", f.NumIngressesTest[i], f.StepCreateLatencies[i])
|
||||
res += fmt.Sprintf("Duration to update one ingress with %d ingresses existing: %v\n", f.NumIngressesTest[i]+1, f.StepUpdateLatencies[i])
|
||||
}
|
||||
res = res + "--- Summary ---\n"
|
||||
var batchTotalStr, batchAvgStr, singleCreateStr, singleUpdateStr string
|
||||
for i, latencies := range f.BatchCreateLatencies {
|
||||
batchTotalStr += fmt.Sprintf("Batch creation total latency for %d ingresses with %d ingresses existing: %v\n", len(latencies), f.NumIngressesTest[i]-len(latencies), f.BatchDurations[i])
|
||||
var avgLatency time.Duration
|
||||
for _, latency := range latencies {
|
||||
avgLatency = avgLatency + latency
|
||||
}
|
||||
avgLatency /= time.Duration(len(latencies))
|
||||
batchAvgStr += fmt.Sprintf("Batch creation average latency for %d ingresses with %d ingresses existing: %v\n", len(latencies), f.NumIngressesTest[i]-len(latencies), avgLatency)
|
||||
singleCreateStr += fmt.Sprintf("Single ingress creation latency with %d ingresses existing: %v\n", f.NumIngressesTest[i], f.StepCreateLatencies[i])
|
||||
singleUpdateStr += fmt.Sprintf("Single ingress update latency with %d ingresses existing: %v\n", f.NumIngressesTest[i]+1, f.StepUpdateLatencies[i])
|
||||
}
|
||||
res += batchTotalStr + batchAvgStr + singleCreateStr + singleUpdateStr
|
||||
return res
|
||||
}
|
||||
|
||||
func addTestPathToIngress(ing *extensions.Ingress) {
|
||||
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths = append(
|
||||
ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths,
|
||||
extensions.HTTPIngressPath{
|
||||
Path: "/test",
|
||||
Backend: ing.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Backend,
|
||||
})
|
||||
}
|
||||
|
||||
func (f *IngressScaleFramework) createScaleTestServiceIngress(suffix string, enableTLS bool) (*v1.Service, *extensions.Ingress, error) {
|
||||
svcCreated, err := f.Clientset.CoreV1().Services(f.Namespace).Create(generateScaleTestServiceSpec(suffix))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ingCreated, err := f.Clientset.ExtensionsV1beta1().Ingresses(f.Namespace).Create(generateScaleTestIngressSpec(suffix, enableTLS))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return svcCreated, ingCreated, nil
|
||||
}
|
||||
|
||||
func generateScaleTestIngressSpec(suffix string, enableTLS bool) *extensions.Ingress {
|
||||
ing := &extensions.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", scaleTestIngressNamePrefix, suffix),
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
TLS: []extensions.IngressTLS{
|
||||
{SecretName: scaleTestSecretName},
|
||||
},
|
||||
Rules: []extensions.IngressRule{
|
||||
{
|
||||
Host: scaleTestHostname,
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: []extensions.HTTPIngressPath{
|
||||
{
|
||||
Path: "/scale",
|
||||
Backend: extensions.IngressBackend{
|
||||
ServiceName: fmt.Sprintf("%s-%s", scaleTestBackendName, suffix),
|
||||
ServicePort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if enableTLS {
|
||||
ing.Spec.TLS = []extensions.IngressTLS{
|
||||
{SecretName: scaleTestSecretName},
|
||||
}
|
||||
}
|
||||
return ing
|
||||
}
|
||||
|
||||
func generateScaleTestServiceSpec(suffix string) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", scaleTestBackendName, suffix),
|
||||
Labels: scaleTestLabels,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Name: "http",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(8080),
|
||||
}},
|
||||
Selector: scaleTestLabels,
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateScaleTestBackendDeploymentSpec(numReplicas int32) *extensions.Deployment {
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleTestBackendName,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &numReplicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: scaleTestLabels},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: scaleTestLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: scaleTestBackendName,
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 8080}},
|
||||
ReadinessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Port: intstr.FromInt(8080),
|
||||
Path: "/healthz",
|
||||
},
|
||||
},
|
||||
FailureThreshold: 10,
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
TimeoutSeconds: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
38
vendor/k8s.io/kubernetes/test/e2e/network/scale/localrun/BUILD
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/test/e2e/network/scale/localrun/BUILD
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["ingress_scale.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/network/scale/localrun",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/network/scale:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "localrun",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
185
vendor/k8s.io/kubernetes/test/e2e/network/scale/localrun/ingress_scale.go
generated
vendored
Normal file
185
vendor/k8s.io/kubernetes/test/e2e/network/scale/localrun/ingress_scale.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/network/scale"
|
||||
)
|
||||
|
||||
var (
|
||||
kubeconfig string
|
||||
enableTLS bool
|
||||
numIngressesTest numIngressesSlice
|
||||
testNamespace string
|
||||
cloudConfig framework.CloudConfig
|
||||
outputFile string
|
||||
cleanup bool
|
||||
)
|
||||
|
||||
type numIngressesSlice []int
|
||||
|
||||
func (i *numIngressesSlice) String() string {
|
||||
return fmt.Sprintf("%d", *i)
|
||||
}
|
||||
|
||||
func (i *numIngressesSlice) Set(value string) error {
|
||||
v, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = append(*i, v)
|
||||
sort.Ints(*i)
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerFlags() {
|
||||
if home := os.Getenv("HOME"); home != "" {
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) Absolute path to the kubeconfig file")
|
||||
} else {
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Absolute path to the kubeconfig file")
|
||||
}
|
||||
flag.StringVar(&cloudConfig.ProjectID, "project", "", "GCE project being used")
|
||||
flag.StringVar(&cloudConfig.Zone, "zone", "", "GCE zone being used")
|
||||
flag.StringVar(&cloudConfig.Region, "region", "", "GCE region being used")
|
||||
flag.Var(&numIngressesTest, "num-ingresses", "The number of ingresses to test, specify multiple times for step testing (e.g. 5 ingresses -> 20 ingresses -> 100 ingresses)")
|
||||
flag.BoolVar(&enableTLS, "enable-tls", true, "Whether to enable TLS on ingress")
|
||||
flag.StringVar(&testNamespace, "namespace", "ingress-test-scale", "Namespace for testing")
|
||||
flag.StringVar(&outputFile, "output", "", "If specify, dump latencies to the specified file")
|
||||
flag.BoolVar(&cleanup, "cleanup", true, "Whether to cleanup resources after test")
|
||||
}
|
||||
|
||||
func verifyFlags() error {
|
||||
if cloudConfig.ProjectID == "" || cloudConfig.Zone == "" || cloudConfig.Region == "" {
|
||||
return fmt.Errorf("must set all of --project, --zone and --region")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
registerFlags()
|
||||
flag.Parse()
|
||||
if err := verifyFlags(); err != nil {
|
||||
glog.Errorf("Failed to verify flags: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Initializing a k8s client.
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to build kubeconfig: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
cs, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create kubeclient: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Initializing a GCE client.
|
||||
gceAlphaFeatureGate, err := gcecloud.NewAlphaFeatureGate([]string{})
|
||||
if err != nil {
|
||||
glog.Errorf("Encountered error for creating alpha feature gate: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
|
||||
ProjectID: cloudConfig.ProjectID,
|
||||
Region: cloudConfig.Region,
|
||||
Zone: cloudConfig.Zone,
|
||||
AlphaFeatureGate: gceAlphaFeatureGate,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Error building GCE provider: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
cloudConfig.Provider = gceCloud
|
||||
|
||||
testSuccessFlag := true
|
||||
defer func() {
|
||||
if !testSuccessFlag {
|
||||
glog.Errorf("Ingress scale test failed.")
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
ns := &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testNamespace,
|
||||
},
|
||||
}
|
||||
glog.Infof("Creating namespace %s...", ns.Name)
|
||||
if _, err := cs.CoreV1().Namespaces().Create(ns); err != nil {
|
||||
glog.Errorf("Failed to create namespace %s: %v", ns.Name, err)
|
||||
testSuccessFlag = false
|
||||
return
|
||||
}
|
||||
if cleanup {
|
||||
defer func() {
|
||||
glog.Infof("Deleting namespace %s...", ns.Name)
|
||||
if err := cs.CoreV1().Namespaces().Delete(ns.Name, nil); err != nil {
|
||||
glog.Errorf("Failed to delete namespace %s: %v", ns.Name, err)
|
||||
testSuccessFlag = false
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Setting up a localized scale test framework.
|
||||
f := scale.NewIngressScaleFramework(cs, ns.Name, cloudConfig)
|
||||
f.Logger = &framework.GLogger{}
|
||||
// Customizing scale test.
|
||||
f.EnableTLS = enableTLS
|
||||
f.OutputFile = outputFile
|
||||
if len(numIngressesTest) != 0 {
|
||||
f.NumIngressesTest = numIngressesTest
|
||||
}
|
||||
|
||||
// Real test begins.
|
||||
if cleanup {
|
||||
defer func() {
|
||||
if errs := f.CleanupScaleTest(); len(errs) != 0 {
|
||||
glog.Errorf("Failed to cleanup scale test: %v", errs)
|
||||
testSuccessFlag = false
|
||||
}
|
||||
}()
|
||||
}
|
||||
err = f.PrepareScaleTest()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to prepare scale test: %v", err)
|
||||
testSuccessFlag = false
|
||||
return
|
||||
}
|
||||
|
||||
if errs := f.RunScaleTest(); len(errs) != 0 {
|
||||
glog.Errorf("Failed while running scale test: %v", errs)
|
||||
testSuccessFlag = false
|
||||
}
|
||||
}
|
54
vendor/k8s.io/kubernetes/test/e2e/network/service.go
generated
vendored
54
vendor/k8s.io/kubernetes/test/e2e/network/service.go
generated
vendored
@ -25,6 +25,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -572,16 +574,23 @@ var _ = SIGDescribe("Services", func() {
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
By("creating a static load balancer IP")
|
||||
staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunId)
|
||||
requestedIP, err = framework.CreateGCEStaticIP(staticIPName)
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
|
||||
defer func() {
|
||||
if staticIPName != "" {
|
||||
// Release GCE static IP - this is not kube-managed and will not be automatically released.
|
||||
if err := framework.DeleteGCEStaticIP(staticIPName); err != nil {
|
||||
if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
|
||||
framework.Logf("failed to release static IP %s: %v", staticIPName, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
requestedIP = reservedAddr.Address
|
||||
framework.Logf("Allocated static load balancer IP: %s", requestedIP)
|
||||
}
|
||||
|
||||
@ -622,9 +631,11 @@ var _ = SIGDescribe("Services", func() {
|
||||
// coming from, so this is first-aid rather than surgery).
|
||||
By("demoting the static IP to ephemeral")
|
||||
if staticIPName != "" {
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Deleting it after it is attached "demotes" it to an
|
||||
// ephemeral IP, which can be auto-released.
|
||||
if err := framework.DeleteGCEStaticIP(staticIPName); err != nil {
|
||||
if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
|
||||
framework.Failf("failed to release static IP %s: %v", staticIPName, err)
|
||||
}
|
||||
staticIPName = ""
|
||||
@ -815,7 +826,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
tcpService := jig.CreateTCPServiceOrFail(ns, nil)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the updating NodePorts test service")
|
||||
err := cs.Core().Services(ns).Delete(serviceName, nil)
|
||||
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
|
||||
@ -1265,7 +1276,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
}
|
||||
|
||||
By("Scaling down replication controller to zero")
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, t.Namespace, rcSpec.Name, 0, false)
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false)
|
||||
|
||||
By("Update service to not tolerate unready services")
|
||||
_, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
|
||||
@ -1435,9 +1446,37 @@ var _ = SIGDescribe("Services", func() {
|
||||
svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, createTimeout)
|
||||
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
|
||||
lbIngress := &svc.Status.LoadBalancer.Ingress[0]
|
||||
svcPort := int(svc.Spec.Ports[0].Port)
|
||||
// should have an internal IP.
|
||||
Expect(isInternalEndpoint(lbIngress)).To(BeTrue())
|
||||
|
||||
// ILBs are not accessible from the test orchestrator, so it's necessary to use
|
||||
// a pod to test the service.
|
||||
By("hitting the internal load balancer from pod")
|
||||
framework.Logf("creating pod with host network")
|
||||
hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec")
|
||||
|
||||
framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
|
||||
tcpIngressIP := framework.GetIngressPoint(lbIngress)
|
||||
if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
|
||||
cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort)
|
||||
stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
|
||||
if err != nil {
|
||||
framework.Logf("error curling; stdout: %v. err: %v", stdout, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !strings.Contains(stdout, "hello") {
|
||||
framework.Logf("Expected output to contain 'hello', got %q; retrying...", stdout)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
framework.Logf("Successful curl; stdout: %v", stdout)
|
||||
return true, nil
|
||||
}); pollErr != nil {
|
||||
framework.Failf("Failed to hit ILB IP, err: %v", pollErr)
|
||||
}
|
||||
|
||||
By("switching to external type LoadBalancer")
|
||||
svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) {
|
||||
disableILB(svc)
|
||||
@ -1457,6 +1496,11 @@ var _ = SIGDescribe("Services", func() {
|
||||
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
|
||||
Expect(isInternalEndpoint(lbIngress)).To(BeFalse())
|
||||
|
||||
By("hitting the external load balancer")
|
||||
framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
|
||||
tcpIngressIP = framework.GetIngressPoint(lbIngress)
|
||||
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
|
||||
|
||||
// GCE cannot test a specific IP because the test may not own it. This cloud specific condition
|
||||
// will be removed when GCP supports similar functionality.
|
||||
if framework.ProviderIs("azure") {
|
||||
|
Reference in New Issue
Block a user