vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

78
vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD generated vendored Normal file
View File

@ -0,0 +1,78 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"loadbalancer.go",
"port_allocator.go",
"proxier.go",
"proxysocket.go",
"rlimit.go",
"roundrobin.go",
"udp_server.go",
] + select({
"@io_bazel_rules_go//go/platform:windows_amd64": [
"rlimit_windows.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/proxy/userspace",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/slice:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"port_allocator_test.go",
"proxier_test.go",
"roundrobin_test.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/userspace",
library = ":go_default_library",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/util/iptables/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

5
vendor/k8s.io/kubernetes/pkg/proxy/userspace/OWNERS generated vendored Executable file
View File

@ -0,0 +1,5 @@
reviewers:
- thockin
- lavalamp
- smarterclayton
- freehan

View File

@ -0,0 +1,34 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
"net"
)
// LoadBalancer is an interface for distributing incoming requests to service endpoints.
type LoadBalancer interface {
// NextEndpoint returns the endpoint to handle a request for the given
// service-port and source address.
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeSeconds int) error
DeleteService(service proxy.ServicePortName)
CleanupStaleStickySessions(service proxy.ServicePortName)
ServiceHasEndpoints(service proxy.ServicePortName) bool
}

View File

@ -0,0 +1,158 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"errors"
"math/big"
"math/rand"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
)
var (
errPortRangeNoPortsRemaining = errors.New("port allocation failed; there are no remaining ports left to allocate in the accepted range")
)
type PortAllocator interface {
AllocateNext() (int, error)
Release(int)
}
// randomAllocator is a PortAllocator implementation that allocates random ports, yielding
// a port value of 0 for every call to AllocateNext().
type randomAllocator struct{}
// AllocateNext always returns 0
func (r *randomAllocator) AllocateNext() (int, error) {
return 0, nil
}
// Release is a noop
func (r *randomAllocator) Release(_ int) {
// noop
}
// newPortAllocator builds PortAllocator for a given PortRange. If the PortRange is empty
// then a random port allocator is returned; otherwise, a new range-based allocator
// is returned.
func newPortAllocator(r net.PortRange) PortAllocator {
if r.Base == 0 {
return &randomAllocator{}
}
return newPortRangeAllocator(r, true)
}
const (
portsBufSize = 16
nextFreePortCooldown = 500 * time.Millisecond
allocateNextTimeout = 1 * time.Second
)
type rangeAllocator struct {
net.PortRange
ports chan int
used big.Int
lock sync.Mutex
rand *rand.Rand
}
func newPortRangeAllocator(r net.PortRange, autoFill bool) PortAllocator {
if r.Base == 0 || r.Size == 0 {
panic("illegal argument: may not specify an empty port range")
}
ra := &rangeAllocator{
PortRange: r,
ports: make(chan int, portsBufSize),
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
}
if autoFill {
go wait.Forever(func() { ra.fillPorts() }, nextFreePortCooldown)
}
return ra
}
// fillPorts loops, always searching for the next free port and, if found, fills the ports buffer with it.
// this func blocks unless there are no remaining free ports.
func (r *rangeAllocator) fillPorts() {
for {
if !r.fillPortsOnce() {
return
}
}
}
func (r *rangeAllocator) fillPortsOnce() bool {
port := r.nextFreePort()
if port == -1 {
return false
}
r.ports <- port
return true
}
// nextFreePort finds a free port, first picking a random port. if that port is already in use
// then the port range is scanned sequentially until either a port is found or the scan completes
// unsuccessfully. an unsuccessful scan returns a port of -1.
func (r *rangeAllocator) nextFreePort() int {
r.lock.Lock()
defer r.lock.Unlock()
// choose random port
j := r.rand.Intn(r.Size)
if b := r.used.Bit(j); b == 0 {
r.used.SetBit(&r.used, j, 1)
return j + r.Base
}
// search sequentially
for i := j + 1; i < r.Size; i++ {
if b := r.used.Bit(i); b == 0 {
r.used.SetBit(&r.used, i, 1)
return i + r.Base
}
}
for i := 0; i < j; i++ {
if b := r.used.Bit(i); b == 0 {
r.used.SetBit(&r.used, i, 1)
return i + r.Base
}
}
return -1
}
func (r *rangeAllocator) AllocateNext() (port int, err error) {
select {
case port = <-r.ports:
case <-time.After(allocateNextTimeout):
err = errPortRangeNoPortsRemaining
}
return
}
func (r *rangeAllocator) Release(port int) {
port -= r.Base
if port < 0 || port >= r.Size {
return
}
r.lock.Lock()
defer r.lock.Unlock()
r.used.SetBit(&r.used, port, 0)
}

View File

@ -0,0 +1,178 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"reflect"
"testing"
"k8s.io/apimachinery/pkg/util/net"
)
func TestRangeAllocatorEmpty(t *testing.T) {
r := &net.PortRange{}
r.Set("0-0")
defer func() {
if rv := recover(); rv == nil {
t.Fatalf("expected panic because of empty port range: %#v", r)
}
}()
_ = newPortRangeAllocator(*r, true)
}
func TestRangeAllocatorFullyAllocated(t *testing.T) {
r := &net.PortRange{}
r.Set("1-1")
// Don't auto-fill ports, we'll manually turn the crank
pra := newPortRangeAllocator(*r, false)
a := pra.(*rangeAllocator)
// Fill in the one available port
if !a.fillPortsOnce() {
t.Fatalf("Expected to be able to fill ports")
}
// There should be no ports available
if a.fillPortsOnce() {
t.Fatalf("Expected to be unable to fill ports")
}
p, err := a.AllocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if p != 1 {
t.Fatalf("unexpected allocated port: %d", p)
}
a.lock.Lock()
if bit := a.used.Bit(p - a.Base); bit != 1 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", p)
}
a.lock.Unlock()
_, err = a.AllocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
a.Release(p)
a.lock.Lock()
if bit := a.used.Bit(p - a.Base); bit != 0 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", p)
}
a.lock.Unlock()
// Fill in the one available port
if !a.fillPortsOnce() {
t.Fatalf("Expected to be able to fill ports")
}
p, err = a.AllocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if p != 1 {
t.Fatalf("unexpected allocated port: %d", p)
}
a.lock.Lock()
if bit := a.used.Bit(p - a.Base); bit != 1 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", p)
}
a.lock.Unlock()
_, err = a.AllocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
}
func TestRangeAllocator_RandomishAllocation(t *testing.T) {
r := &net.PortRange{}
r.Set("1-100")
pra := newPortRangeAllocator(*r, false)
a := pra.(*rangeAllocator)
// allocate all the ports
var err error
ports := make([]int, 100, 100)
for i := 0; i < 100; i++ {
if !a.fillPortsOnce() {
t.Fatalf("Expected to be able to fill ports")
}
ports[i], err = a.AllocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if ports[i] < 1 || ports[i] > 100 {
t.Fatalf("unexpected allocated port: %d", ports[i])
}
a.lock.Lock()
if bit := a.used.Bit(ports[i] - a.Base); bit != 1 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
}
a.lock.Unlock()
}
if a.fillPortsOnce() {
t.Fatalf("Expected to be unable to fill ports")
}
// release them all
for i := 0; i < 100; i++ {
a.Release(ports[i])
a.lock.Lock()
if bit := a.used.Bit(ports[i] - a.Base); bit != 0 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
}
a.lock.Unlock()
}
// allocate the ports again
rports := make([]int, 100, 100)
for i := 0; i < 100; i++ {
if !a.fillPortsOnce() {
t.Fatalf("Expected to be able to fill ports")
}
rports[i], err = a.AllocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if rports[i] < 1 || rports[i] > 100 {
t.Fatalf("unexpected allocated port: %d", rports[i])
}
a.lock.Lock()
if bit := a.used.Bit(rports[i] - a.Base); bit != 1 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", rports[i])
}
a.lock.Unlock()
}
if a.fillPortsOnce() {
t.Fatalf("Expected to be unable to fill ports")
}
if reflect.DeepEqual(ports, rports) {
t.Fatalf("expected re-allocated ports to be in a somewhat random order")
}
}

1108
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,923 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strconv"
"sync/atomic"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)
const (
udpIdleTimeoutForTest = 250 * time.Millisecond
)
func joinHostPort(host string, port int) string {
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
}
func waitForClosedPortTCP(p *Proxier, proxyPort int) error {
for i := 0; i < 50; i++ {
conn, err := net.Dial("tcp", joinHostPort("", proxyPort))
if err != nil {
return nil
}
conn.Close()
time.Sleep(1 * time.Millisecond)
}
return fmt.Errorf("port %d still open", proxyPort)
}
func waitForClosedPortUDP(p *Proxier, proxyPort int) error {
for i := 0; i < 50; i++ {
conn, err := net.Dial("udp", joinHostPort("", proxyPort))
if err != nil {
return nil
}
conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
// To detect a closed UDP port write, then read.
_, err = conn.Write([]byte("x"))
if err != nil {
if e, ok := err.(net.Error); ok && !e.Timeout() {
return nil
}
}
var buf [4]byte
_, err = conn.Read(buf[0:])
if err != nil {
if e, ok := err.(net.Error); ok && !e.Timeout() {
return nil
}
}
conn.Close()
time.Sleep(1 * time.Millisecond)
}
return fmt.Errorf("port %d still open", proxyPort)
}
var tcpServerPort int32
var udpServerPort int32
func TestMain(m *testing.M) {
// Don't handle panics
runtime.ReallyCrash = true
// TCP setup.
tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.URL.Path[1:]))
}))
defer tcp.Close()
u, err := url.Parse(tcp.URL)
if err != nil {
panic(fmt.Sprintf("failed to parse: %v", err))
}
_, port, err := net.SplitHostPort(u.Host)
if err != nil {
panic(fmt.Sprintf("failed to parse: %v", err))
}
tcpServerPortValue, err := strconv.Atoi(port)
if err != nil {
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
}
tcpServerPort = int32(tcpServerPortValue)
// UDP setup.
udp, err := newUDPEchoServer()
if err != nil {
panic(fmt.Sprintf("failed to make a UDP server: %v", err))
}
_, port, err = net.SplitHostPort(udp.LocalAddr().String())
if err != nil {
panic(fmt.Sprintf("failed to parse: %v", err))
}
udpServerPortValue, err := strconv.Atoi(port)
if err != nil {
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
}
udpServerPort = int32(udpServerPortValue)
go udp.Loop()
ret := m.Run()
// it should be safe to call Close() multiple times.
tcp.Close()
os.Exit(ret)
}
func testEchoTCP(t *testing.T, address string, port int) {
path := "aaaaa"
res, err := http.Get("http://" + address + ":" + fmt.Sprintf("%d", port) + "/" + path)
if err != nil {
t.Fatalf("error connecting to server: %v", err)
}
defer res.Body.Close()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Errorf("error reading data: %v %v", err, string(data))
}
if string(data) != path {
t.Errorf("expected: %s, got %s", path, string(data))
}
}
func testEchoUDP(t *testing.T, address string, port int) {
data := "abc123"
conn, err := net.Dial("udp", joinHostPort(address, port))
if err != nil {
t.Fatalf("error connecting to server: %v", err)
}
if _, err := conn.Write([]byte(data)); err != nil {
t.Fatalf("error sending to server: %v", err)
}
var resp [1024]byte
n, err := conn.Read(resp[0:])
if err != nil {
t.Errorf("error receiving data: %v", err)
}
if string(resp[0:n]) != data {
t.Errorf("expected: %s, got %s", data, string(resp[0:n]))
}
}
func waitForNumProxyLoops(t *testing.T, p *Proxier, want int32) {
var got int32
for i := 0; i < 600; i++ {
got = atomic.LoadInt32(&p.numProxyLoops)
if got == want {
return
}
time.Sleep(100 * time.Millisecond)
}
t.Errorf("expected %d ProxyLoops running, got %d", want, got)
}
func waitForNumProxyClients(t *testing.T, s *ServiceInfo, want int, timeout time.Duration) {
var got int
now := time.Now()
deadline := now.Add(timeout)
for time.Now().Before(deadline) {
s.ActiveClients.Mu.Lock()
got = len(s.ActiveClients.Clients)
s.ActiveClients.Mu.Unlock()
if got == want {
return
}
time.Sleep(500 * time.Millisecond)
}
t.Errorf("expected %d ProxyClients live, got %d", want, got)
}
func TestTCPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxyTimeout(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
waitForNumProxyLoops(t, p, 1)
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
// When connecting to a UDP service endpoint, there should be a Conn for proxy.
waitForNumProxyClients(t, svcInfo, 1, time.Second)
// If conn has no activity for serviceInfo.timeout since last Read/Write, it should be closed because of timeout.
waitForNumProxyClients(t, svcInfo, 0, 2*time.Second)
}
func TestMultiPortProxy(t *testing.T) {
lb := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
}},
})
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfoP, err := p.addServiceOnPort(serviceP, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", svcInfoP.proxyPort)
waitForNumProxyLoops(t, p, 1)
svcInfoQ, err := p.addServiceOnPort(serviceQ, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoUDP(t, "127.0.0.1", svcInfoQ.proxyPort)
waitForNumProxyLoops(t, p, 2)
}
func TestMultiPortOnServiceAdd(t *testing.T) {
lb := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "q"}
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: 80,
Protocol: "TCP",
}, {
Name: "q",
Port: 81,
Protocol: "UDP",
}}},
})
waitForNumProxyLoops(t, p, 2)
svcInfo, exists := p.getServiceInfo(serviceP)
if !exists {
t.Fatalf("can't find serviceInfo for %s", serviceP)
}
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 80 || svcInfo.protocol != "TCP" {
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
}
svcInfo, exists = p.getServiceInfo(serviceQ)
if !exists {
t.Fatalf("can't find serviceInfo for %s", serviceQ)
}
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 81 || svcInfo.protocol != "UDP" {
t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo)
}
svcInfo, exists = p.getServiceInfo(serviceX)
if exists {
t.Fatalf("found unwanted serviceInfo for %s: %#v", serviceX, svcInfo)
}
}
// Helper: Stops the proxy for the named service.
func stopProxyByName(proxier *Proxier, service proxy.ServicePortName) error {
info, found := proxier.getServiceInfo(service)
if !found {
return fmt.Errorf("unknown service: %s", service)
}
return proxier.stopProxy(service, info)
}
func TestTCPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
if !svcInfo.IsAlive() {
t.Fatalf("wrong value for IsAlive(): expected true")
}
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
stopProxyByName(p, service)
if svcInfo.IsAlive() {
t.Fatalf("wrong value for IsAlive(): expected false")
}
// Wait for the port to really close.
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestUDPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
stopProxyByName(p, service)
// Wait for the port to really close.
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestTCPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
})
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestUDPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
}}},
})
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
})
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
})
svcInfo, exists := p.getServiceInfo(service)
if !exists {
t.Fatalf("can't find serviceInfo for %s", service)
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
}}},
})
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
}}},
})
svcInfo, exists := p.getServiceInfo(service)
if !exists {
t.Fatalf("can't find serviceInfo")
}
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func TestTCPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: 99,
Protocol: "TCP",
}}},
})
// Wait for the socket to actually get free.
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
svcInfo, exists := p.getServiceInfo(service)
if !exists {
t.Fatalf("can't find serviceInfo")
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
// This is a bit async, but this should be sufficient.
time.Sleep(500 * time.Millisecond)
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: 99,
Protocol: "UDP",
}}},
})
// Wait for the socket to actually get free.
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
svcInfo, exists := p.getServiceInfo(service)
if !exists {
t.Fatalf("can't find serviceInfo")
}
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func TestProxyUpdatePublicIPs(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
}},
ClusterIP: svcInfo.portal.ip.String(),
ExternalIPs: []string{"4.3.2.1"},
},
})
// Wait for the socket to actually get free.
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
svcInfo, exists := p.getServiceInfo(service)
if !exists {
t.Fatalf("can't find serviceInfo")
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
// This is a bit async, but this should be sufficient.
time.Sleep(500 * time.Millisecond)
waitForNumProxyLoops(t, p, 1)
}
func TestProxyUpdatePortal(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
svcv0 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
}
svcv1 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
}
p.OnServiceUpdate(svcv0, svcv1)
_, exists := p.getServiceInfo(service)
if exists {
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
}
svcv2 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
}
p.OnServiceUpdate(svcv1, svcv2)
_, exists = p.getServiceInfo(service)
if exists {
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
}
svcv3 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
}
p.OnServiceUpdate(svcv2, svcv3)
lb.OnEndpointsAdd(endpoint)
svcInfo, exists = p.getServiceInfo(service)
if !exists {
t.Fatalf("service with ClusterIP set not found in the proxy")
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func makeFakeExec() *fakeexec.FakeExec {
fcmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
},
}
return &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
},
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
}
}
// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in

View File

@ -0,0 +1,302 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"fmt"
"io"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
)
// Abstraction over TCP/UDP sockets which are proxied.
type ProxySocket interface {
// Addr gets the net.Addr for a ProxySocket.
Addr() net.Addr
// Close stops the ProxySocket from accepting incoming connections.
// Each implementation should comment on the impact of calling Close
// while sessions are active.
Close() error
// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
ProxyLoop(service proxy.ServicePortName, info *ServiceInfo, loadBalancer LoadBalancer)
// ListenPort returns the host port that the ProxySocket is listening on
ListenPort() int
}
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (ProxySocket, error) {
host := ""
if ip != nil {
host = ip.String()
}
switch strings.ToUpper(string(protocol)) {
case "TCP":
listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
if err != nil {
return nil, err
}
return &tcpProxySocket{Listener: listener, port: port}, nil
case "UDP":
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
return &udpProxySocket{UDPConn: conn, port: port}, nil
}
return nil, fmt.Errorf("unknown protocol %q", protocol)
}
// How long we wait for a connection to a backend in seconds
var EndpointDialTimeouts = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
// tcpProxySocket implements ProxySocket. Close() is implemented by net.Listener. When Close() is called,
// no new connections are allowed but existing connections are left untouched.
type tcpProxySocket struct {
net.Listener
port int
}
func (tcp *tcpProxySocket) ListenPort() int {
return tcp.port
}
// TryConnectEndpoints attempts to connect to the next available endpoint for the given service, cycling
// through until it is able to successully connect, or it has tried with all timeouts in EndpointDialTimeouts.
func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protocol string, loadBalancer LoadBalancer) (out net.Conn, err error) {
sessionAffinityReset := false
for _, dialTimeout := range EndpointDialTimeouts {
endpoint, err := loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset)
if err != nil {
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
return nil, err
}
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
if err != nil {
if isTooManyFDsError(err) {
panic("Dial failed: " + err.Error())
}
glog.Errorf("Dial failed: %v", err)
sessionAffinityReset = true
continue
}
return outConn, nil
}
return nil, fmt.Errorf("failed to connect to an endpoint.")
}
func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *ServiceInfo, loadBalancer LoadBalancer) {
for {
if !myInfo.IsAlive() {
// The service port was closed or replaced.
return
}
// Block until a connection is made.
inConn, err := tcp.Accept()
if err != nil {
if isTooManyFDsError(err) {
panic("Accept failed: " + err.Error())
}
if isClosedError(err) {
return
}
if !myInfo.IsAlive() {
// Then the service port was just closed so the accept failure is to be expected.
return
}
glog.Errorf("Accept failed: %v", err)
continue
}
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
outConn, err := TryConnectEndpoints(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", loadBalancer)
if err != nil {
glog.Errorf("Failed to connect to balancer: %v", err)
inConn.Close()
continue
}
// Spin up an async copy loop.
go ProxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
}
}
// ProxyTCP proxies data bi-directionally between in and out.
func ProxyTCP(in, out *net.TCPConn) {
var wg sync.WaitGroup
wg.Add(2)
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
go copyBytes("from backend", in, out, &wg)
go copyBytes("to backend", out, in, &wg)
wg.Wait()
}
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
defer wg.Done()
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
n, err := io.Copy(dest, src)
if err != nil {
if !isClosedError(err) {
glog.Errorf("I/O error: %v", err)
}
}
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
dest.Close()
src.Close()
}
// udpProxySocket implements ProxySocket. Close() is implemented by net.UDPConn. When Close() is called,
// no new connections are allowed and existing connections are broken.
// TODO: We could lame-duck this ourselves, if it becomes important.
type udpProxySocket struct {
*net.UDPConn
port int
}
func (udp *udpProxySocket) ListenPort() int {
return udp.port
}
func (udp *udpProxySocket) Addr() net.Addr {
return udp.LocalAddr()
}
// Holds all the known UDP clients that have not timed out.
type ClientCache struct {
Mu sync.Mutex
Clients map[string]net.Conn // addr string -> connection
}
func newClientCache() *ClientCache {
return &ClientCache{Clients: map[string]net.Conn{}}
}
func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *ServiceInfo, loadBalancer LoadBalancer) {
var buffer [4096]byte // 4KiB should be enough for most whole-packets
for {
if !myInfo.IsAlive() {
// The service port was closed or replaced.
break
}
// Block until data arrives.
// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
n, cliAddr, err := udp.ReadFrom(buffer[0:])
if err != nil {
if e, ok := err.(net.Error); ok {
if e.Temporary() {
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
continue
}
}
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
break
}
// If this is a client we know already, reuse the connection and goroutine.
svrConn, err := udp.getBackendConn(myInfo.ActiveClients, cliAddr, loadBalancer, service, myInfo.Timeout)
if err != nil {
continue
}
// TODO: It would be nice to let the goroutine handle this write, but we don't
// really want to copy the buffer. We could do a pool of buffers or something.
_, err = svrConn.Write(buffer[0:n])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Write failed: %v", err)
// TODO: Maybe tear down the goroutine for this client/server pair?
}
continue
}
err = svrConn.SetDeadline(time.Now().Add(myInfo.Timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
continue
}
}
}
func (udp *udpProxySocket) getBackendConn(activeClients *ClientCache, cliAddr net.Addr, loadBalancer LoadBalancer, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) {
activeClients.Mu.Lock()
defer activeClients.Mu.Unlock()
svrConn, found := activeClients.Clients[cliAddr.String()]
if !found {
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
glog.V(3).Infof("New UDP connection from %s", cliAddr)
var err error
svrConn, err = TryConnectEndpoints(service, cliAddr, "udp", loadBalancer)
if err != nil {
return nil, err
}
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
glog.Errorf("SetDeadline failed: %v", err)
return nil, err
}
activeClients.Clients[cliAddr.String()] = svrConn
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) {
defer runtime.HandleCrash()
udp.proxyClient(cliAddr, svrConn, activeClients, timeout)
}(cliAddr, svrConn, activeClients, timeout)
}
return svrConn, nil
}
// This function is expected to be called as a goroutine.
// TODO: Track and log bytes copied, like TCP
func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) {
defer svrConn.Close()
var buffer [4096]byte
for {
n, err := svrConn.Read(buffer[0:])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Read failed: %v", err)
}
break
}
err = svrConn.SetDeadline(time.Now().Add(timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
break
}
n, err = udp.WriteTo(buffer[0:n], cliAddr)
if err != nil {
if !logTimeout(err) {
glog.Errorf("WriteTo failed: %v", err)
}
break
}
}
activeClients.Mu.Lock()
delete(activeClients.Clients, cliAddr.String())
activeClients.Mu.Unlock()
}

25
vendor/k8s.io/kubernetes/pkg/proxy/userspace/rlimit.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// +build !windows
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import "golang.org/x/sys/unix"
func setRLimit(limit uint64) error {
return unix.Setrlimit(unix.RLIMIT_NOFILE, &unix.Rlimit{Max: limit, Cur: limit})
}

View File

@ -0,0 +1,23 @@
// +build windows
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
func setRLimit(limit uint64) error {
return nil
}

View File

@ -0,0 +1,386 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"errors"
"fmt"
"net"
"reflect"
"strconv"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/util/slice"
)
var (
ErrMissingServiceEntry = errors.New("missing service entry")
ErrMissingEndpoints = errors.New("missing endpoints")
)
type affinityState struct {
clientIP string
//clientProtocol api.Protocol //not yet used
//sessionCookie string //not yet used
endpoint string
lastUsed time.Time
}
type affinityPolicy struct {
affinityType api.ServiceAffinity
affinityMap map[string]*affinityState // map client IP -> affinity info
ttlSeconds int
}
// LoadBalancerRR is a round-robin load balancer.
type LoadBalancerRR struct {
lock sync.RWMutex
services map[proxy.ServicePortName]*balancerState
}
// Ensure this implements LoadBalancer.
var _ LoadBalancer = &LoadBalancerRR{}
type balancerState struct {
endpoints []string // a list of "ip:port" style strings
index int // current index into endpoints
affinity affinityPolicy
}
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlSeconds int) *affinityPolicy {
return &affinityPolicy{
affinityType: affinityType,
affinityMap: make(map[string]*affinityState),
ttlSeconds: ttlSeconds,
}
}
// NewLoadBalancerRR returns a new LoadBalancerRR.
func NewLoadBalancerRR() *LoadBalancerRR {
return &LoadBalancerRR{
services: map[proxy.ServicePortName]*balancerState{},
}
}
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) error {
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
return nil
}
// This assumes that lb.lock is already held.
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) *balancerState {
if ttlSeconds == 0 {
ttlSeconds = int(api.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
}
if _, exists := lb.services[svcPort]; !exists {
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
} else if affinityType != "" {
lb.services[svcPort].affinity.affinityType = affinityType
}
return lb.services[svcPort]
}
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
delete(lb.services, svcPort)
}
// return true if this service is using some form of session affinity.
func isSessionAffinity(affinity *affinityPolicy) bool {
// Should never be empty string, but checking for it to be safe.
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
return false
}
return true
}
// ServiceHasEndpoints checks whether a service entry has endpoints.
func (lb *LoadBalancerRR) ServiceHasEndpoints(svcPort proxy.ServicePortName) bool {
lb.lock.Lock()
defer lb.lock.Unlock()
state, exists := lb.services[svcPort]
// TODO: while nothing ever assigns nil to the map, *some* of the code using the map
// checks for it. The code should all follow the same convention.
return exists && state != nil && len(state.endpoints) > 0
}
// NextEndpoint returns a service endpoint.
// The service endpoint is chosen using the round-robin algorithm.
func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) {
// Coarse locking is simple. We can get more fine-grained if/when we
// can prove it matters.
lb.lock.Lock()
defer lb.lock.Unlock()
state, exists := lb.services[svcPort]
if !exists || state == nil {
return "", ErrMissingServiceEntry
}
if len(state.endpoints) == 0 {
return "", ErrMissingEndpoints
}
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
var ipaddr string
if sessionAffinityEnabled {
// Caution: don't shadow ipaddr
var err error
ipaddr, _, err = net.SplitHostPort(srcAddr.String())
if err != nil {
return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err)
}
if !sessionAffinityReset {
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
// Affinity wins.
endpoint := sessionAffinity.endpoint
sessionAffinity.lastUsed = time.Now()
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
return endpoint, nil
}
}
}
// Take the next endpoint.
endpoint := state.endpoints[state.index]
state.index = (state.index + 1) % len(state.endpoints)
if sessionAffinityEnabled {
var affinity *affinityState
affinity = state.affinity.affinityMap[ipaddr]
if affinity == nil {
affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()}
state.affinity.affinityMap[ipaddr] = affinity
}
affinity.lastUsed = time.Now()
affinity.endpoint = endpoint
affinity.clientIP = ipaddr
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
}
return endpoint, nil
}
type hostPortPair struct {
host string
port int
}
func isValidEndpoint(hpp *hostPortPair) bool {
return hpp.host != "" && hpp.port > 0
}
func flattenValidEndpoints(endpoints []hostPortPair) []string {
// Convert Endpoint objects into strings for easier use later. Ignore
// the protocol field - we'll get that from the Service objects.
var result []string
for i := range endpoints {
hpp := &endpoints[i]
if isValidEndpoint(hpp) {
result = append(result, net.JoinHostPort(hpp.host, strconv.Itoa(hpp.port)))
}
}
return result
}
// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
for _, affinity := range state.affinity.affinityMap {
if affinity.endpoint == endpoint {
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
delete(state.affinity.affinityMap, affinity.clientIP)
}
}
}
// Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
// Then remove any session affinity records that are not in both lists.
// This assumes the lb.lock is held.
func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) {
allEndpoints := map[string]int{}
for _, newEndpoint := range newEndpoints {
allEndpoints[newEndpoint] = 1
}
state, exists := lb.services[svcPort]
if !exists {
return
}
for _, existingEndpoint := range state.endpoints {
allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1
}
for mKey, mVal := range allEndpoints {
if mVal == 1 {
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
removeSessionAffinityByEndpoint(state, svcPort, mKey)
}
}
}
// buildPortsToEndpointsMap builds a map of portname -> all ip:ports for that
// portname. Expode Endpoints.Subsets[*] into this structure.
func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPair {
portsToEndpoints := map[string][]hostPortPair{}
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
for i := range ss.Ports {
port := &ss.Ports[i]
for i := range ss.Addresses {
addr := &ss.Addresses[i]
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)})
// Ignore the protocol field - we'll get that from the Service objects.
}
}
}
return portsToEndpoints
}
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
defer lb.lock.Unlock()
for portname := range portsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
state, exists := lb.services[svcPort]
if !exists || state == nil || len(newEndpoints) > 0 {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsAdd can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
state.index = 0
}
}
}
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
oldPortsToEndpoints := buildPortsToEndpointsMap(oldEndpoints)
registeredEndpoints := make(map[proxy.ServicePortName]bool)
lb.lock.Lock()
defer lb.lock.Unlock()
for portname := range portsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
state, exists := lb.services[svcPort]
curEndpoints := []string{}
if state != nil {
curEndpoints = state.endpoints
}
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsUpdate can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
state.index = 0
}
registeredEndpoints[svcPort] = true
}
// Now remove all endpoints missing from the update.
for portname := range oldPortsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: oldEndpoints.Namespace, Name: oldEndpoints.Name}, Port: portname}
if _, exists := registeredEndpoints[svcPort]; !exists {
lb.resetService(svcPort)
}
}
}
func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) {
// If the service is still around, reset but don't delete.
if state, ok := lb.services[svcPort]; ok {
if len(state.endpoints) > 0 {
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
state.endpoints = []string{}
}
state.index = 0
state.affinity.affinityMap = map[string]*affinityState{}
}
}
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
defer lb.lock.Unlock()
for portname := range portsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
lb.resetService(svcPort)
}
}
func (lb *LoadBalancerRR) OnEndpointsSynced() {
}
// Tests whether two slices are equivalent. This sorts both slices in-place.
func slicesEquiv(lhs, rhs []string) bool {
if len(lhs) != len(rhs) {
return false
}
if reflect.DeepEqual(slice.SortStrings(lhs), slice.SortStrings(rhs)) {
return true
}
return false
}
func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) {
lb.lock.Lock()
defer lb.lock.Unlock()
state, exists := lb.services[svcPort]
if !exists {
return
}
for ip, affinity := range state.affinity.affinityMap {
if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
delete(state.affinity.affinityMap, ip)
}
}
}

View File

@ -0,0 +1,717 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"net"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
)
func TestValidateWorks(t *testing.T) {
if isValidEndpoint(&hostPortPair{}) {
t.Errorf("Didn't fail for empty set")
}
if isValidEndpoint(&hostPortPair{host: "foobar"}) {
t.Errorf("Didn't fail with invalid port")
}
if isValidEndpoint(&hostPortPair{host: "foobar", port: -1}) {
t.Errorf("Didn't fail with a negative port")
}
if !isValidEndpoint(&hostPortPair{host: "foobar", port: 8080}) {
t.Errorf("Failed a valid config.")
}
}
func TestFilterWorks(t *testing.T) {
endpoints := []hostPortPair{
{host: "foobar", port: 1},
{host: "foobar", port: 2},
{host: "foobar", port: -1},
{host: "foobar", port: 3},
{host: "foobar", port: -2},
}
filtered := flattenValidEndpoints(endpoints)
if len(filtered) != 3 {
t.Errorf("Failed to filter to the correct size")
}
if filtered[0] != "foobar:1" {
t.Errorf("Index zero is not foobar:1")
}
if filtered[1] != "foobar:2" {
t.Errorf("Index one is not foobar:2")
}
if filtered[2] != "foobar:3" {
t.Errorf("Index two is not foobar:3")
}
}
func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil {
t.Errorf("Didn't fail with non-existent service")
}
if len(endpoint) != 0 {
t.Errorf("Got an endpoint")
}
}
func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false)
if err != nil {
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
}
if endpoint != expected {
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
}
}
func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true)
if err != nil {
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
}
if endpoint != expected {
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
}
}
func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
}
func stringsInSlice(haystack []string, needles ...string) bool {
for _, needle := range needles {
found := false
for i := range haystack {
if haystack[i] == needle {
found = true
break
}
}
if found == false {
return false
}
}
return true
}
func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
shuffledEndpoints := loadBalancer.services[service].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
}
func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
}
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpointsv1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
},
},
}
loadBalancer.OnEndpointsAdd(endpointsv1)
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
shuffledEndpoints = loadBalancer.services[serviceP].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
}
func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"}
endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
},
},
}
endpoints2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
},
},
}
loadBalancer.OnEndpointsAdd(endpoints1)
loadBalancer.OnEndpointsAdd(endpoints2)
shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
// Then update the configuration by removing foo
loadBalancer.OnEndpointsDelete(endpoints1)
endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// but bar is still there, and we continue RR from where we left off.
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
}
func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
}
func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// Call OnEndpointsUpdate() before NewService()
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
}
func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0}
client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0}
client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0}
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
loadBalancer.OnEndpointsAdd(endpointsv1)
shuffledEndpoints := loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
client1Endpoint := shuffledEndpoints[0]
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
client2Endpoint := shuffledEndpoints[1]
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
client3Endpoint := shuffledEndpoints[2]
endpointsv2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
shuffledEndpoints = loadBalancer.services[service].endpoints
if client1Endpoint == "endpoint:3" {
client1Endpoint = shuffledEndpoints[0]
} else if client2Endpoint == "endpoint:3" {
client2Endpoint = shuffledEndpoints[0]
} else if client3Endpoint == "endpoint:3" {
client3Endpoint = shuffledEndpoints[0]
}
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
endpointsv3 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
shuffledEndpoints = loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6)
}
func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
loadBalancer.OnEndpointsAdd(endpointsv1)
shuffledEndpoints := loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
shuffledEndpoints = loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
}
func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
loadBalancer := NewLoadBalancerRR()
fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
loadBalancer.OnEndpointsAdd(endpoints1)
loadBalancer.OnEndpointsAdd(endpoints2)
shuffledFooEndpoints := loadBalancer.services[fooService].endpoints
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
shuffledBarEndpoints := loadBalancer.services[barService].endpoints
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
// Then update the configuration by removing foo
loadBalancer.OnEndpointsDelete(endpoints1)
endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// but bar is still there, and we continue RR from where we left off.
shuffledBarEndpoints = loadBalancer.services[barService].endpoints
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
}
func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
}

View File

@ -0,0 +1,54 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"fmt"
"net"
)
// udpEchoServer is a simple echo server in UDP, intended for testing the proxy.
type udpEchoServer struct {
net.PacketConn
}
func (r *udpEchoServer) Loop() {
var buffer [4096]byte
for {
n, cliAddr, err := r.ReadFrom(buffer[0:])
if err != nil {
fmt.Printf("ReadFrom failed: %v\n", err)
continue
}
r.WriteTo(buffer[0:n], cliAddr)
}
}
func newUDPEchoServer() (*udpEchoServer, error) {
packetconn, err := net.ListenPacket("udp", ":0")
if err != nil {
return nil, err
}
return &udpEchoServer{packetconn}, nil
}
/*
func main() {
r,_ := newUDPEchoServer()
r.Loop()
}
*/