Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -13,60 +13,26 @@ go_library(
"port_allocator.go",
"proxier.go",
"proxysocket.go",
"rlimit.go",
"rlimit_windows.go",
"roundrobin.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"rlimit_windows.go",
],
"//conditions:default": [],
}),
],
importpath = "k8s.io/kubernetes/pkg/proxy/userspace",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/util/conntrack:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/slice:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:android": [
@ -112,13 +78,13 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/util/iptables/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],

View File

@ -17,7 +17,7 @@ limitations under the License.
package userspace
import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/proxy"
"net"
)
@ -27,7 +27,7 @@ type LoadBalancer interface {
// NextEndpoint returns the endpoint to handle a request for the given
// service-port and source address.
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeSeconds int) error
NewService(service proxy.ServicePortName, sessionAffinityType v1.ServiceAffinity, stickyMaxAgeSeconds int) error
DeleteService(service proxy.ServicePortName)
CleanupStaleStickySessions(service proxy.ServicePortName)
ServiceHasEndpoints(service proxy.ServicePortName) bool

View File

@ -25,17 +25,15 @@ import (
"sync/atomic"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/proxy"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/proxy"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
"k8s.io/kubernetes/pkg/util/conntrack"
"k8s.io/kubernetes/pkg/util/iptables"
@ -57,12 +55,12 @@ type ServiceInfo struct {
isAliveAtomic int32 // Only access this with atomic ops
portal portal
protocol api.Protocol
protocol v1.Protocol
proxyPort int
socket ProxySocket
nodePort int
loadBalancerStatus api.LoadBalancerStatus
sessionAffinityType api.ServiceAffinity
loadBalancerStatus v1.LoadBalancerStatus
sessionAffinityType v1.ServiceAffinity
stickyMaxAgeSeconds int
// Deprecated, but required for back-compat (including e2e)
externalIPs []string
@ -83,7 +81,7 @@ func (info *ServiceInfo) IsAlive() bool {
func logTimeout(err error) bool {
if e, ok := err.(net.Error); ok {
if e.Timeout() {
glog.V(3).Infof("connection to endpoint closed due to inactivity")
klog.V(3).Infof("connection to endpoint closed due to inactivity")
return true
}
}
@ -91,7 +89,7 @@ func logTimeout(err error) bool {
}
// ProxySocketFunc is a function which constructs a ProxySocket from a protocol, ip, and port
type ProxySocketFunc func(protocol api.Protocol, ip net.IP, port int) (ProxySocket, error)
type ProxySocketFunc func(protocol v1.Protocol, ip net.IP, port int) (ProxySocket, error)
// Proxier is a simple proxy for TCP connections between a localhost:lport
// and services that provide the actual implementations.
@ -121,7 +119,7 @@ var _ proxy.ProxyProvider = &Proxier{}
type portMapKey struct {
ip string
port int
protocol api.Protocol
protocol v1.Protocol
}
func (k *portMapKey) String() string {
@ -186,7 +184,7 @@ func NewCustomProxier(loadBalancer LoadBalancer, listenIP net.IP, iptables iptab
proxyPorts := newPortAllocator(pr)
glog.V(2).Infof("Setting proxy IP to %v and initializing iptables", hostIP)
klog.V(2).Infof("Setting proxy IP to %v and initializing iptables", hostIP)
return createProxier(loadBalancer, listenIP, iptables, exec, hostIP, proxyPorts, syncPeriod, minSyncPeriod, udpIdleTimeout, makeProxySocket)
}
@ -231,13 +229,13 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) {
args := []string{"-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules"}
if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostPortalChain))...); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error removing userspace rule: %v", err)
klog.Errorf("Error removing userspace rule: %v", err)
encounteredError = true
}
}
if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerPortalChain))...); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error removing userspace rule: %v", err)
klog.Errorf("Error removing userspace rule: %v", err)
encounteredError = true
}
}
@ -245,20 +243,20 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) {
args = append(args, "-m", "comment", "--comment", "handle service NodePorts; NOTE: this must be the last rule in the chain")
if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostNodePortChain))...); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error removing userspace rule: %v", err)
klog.Errorf("Error removing userspace rule: %v", err)
encounteredError = true
}
}
if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerNodePortChain))...); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error removing userspace rule: %v", err)
klog.Errorf("Error removing userspace rule: %v", err)
encounteredError = true
}
}
args = []string{"-m", "comment", "--comment", "Ensure that non-local NodePort traffic can flow"}
if err := ipt.DeleteRule(iptables.TableFilter, iptables.ChainInput, append(args, "-j", string(iptablesNonLocalNodePortChain))...); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error removing userspace rule: %v", err)
klog.Errorf("Error removing userspace rule: %v", err)
encounteredError = true
}
}
@ -273,13 +271,13 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) {
// flush chain, then if successful delete, delete will fail if flush fails.
if err := ipt.FlushChain(table, c); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error flushing userspace chain: %v", err)
klog.Errorf("Error flushing userspace chain: %v", err)
encounteredError = true
}
} else {
if err = ipt.DeleteChain(table, c); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error deleting userspace chain: %v", err)
klog.Errorf("Error deleting userspace chain: %v", err)
encounteredError = true
}
}
@ -292,7 +290,7 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) {
// Sync is called to immediately synchronize the proxier state to iptables
func (proxier *Proxier) Sync() {
if err := iptablesInit(proxier.iptables); err != nil {
glog.Errorf("Failed to ensure iptables: %v", err)
klog.Errorf("Failed to ensure iptables: %v", err)
}
proxier.ensurePortals()
proxier.cleanupStaleStickySessions()
@ -304,7 +302,7 @@ func (proxier *Proxier) SyncLoop() {
defer t.Stop()
for {
<-t.C
glog.V(6).Infof("Periodic sync")
klog.V(6).Infof("Periodic sync")
proxier.Sync()
}
}
@ -317,7 +315,7 @@ func (proxier *Proxier) ensurePortals() {
for name, info := range proxier.serviceMap {
err := proxier.openPortal(name, info)
if err != nil {
glog.Errorf("Failed to ensure portal for %q: %v", name, err)
klog.Errorf("Failed to ensure portal for %q: %v", name, err)
}
}
}
@ -364,7 +362,7 @@ func (proxier *Proxier) setServiceInfo(service proxy.ServicePortName, info *Serv
// addServiceOnPort starts listening for a new service, returning the ServiceInfo.
// Pass proxyPort=0 to allocate a random port. The timeout only applies to UDP
// connections, for now.
func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol api.Protocol, proxyPort int, timeout time.Duration) (*ServiceInfo, error) {
func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol v1.Protocol, proxyPort int, timeout time.Duration) (*ServiceInfo, error) {
sock, err := proxier.makeProxySocket(protocol, proxier.listenIP, proxyPort)
if err != nil {
return nil, err
@ -386,11 +384,11 @@ func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol
proxyPort: portNum,
protocol: protocol,
socket: sock,
sessionAffinityType: api.ServiceAffinityNone, // default
sessionAffinityType: v1.ServiceAffinityNone, // default
}
proxier.setServiceInfo(service, si)
glog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum)
klog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum)
go func(service proxy.ServicePortName, proxier *Proxier) {
defer runtime.HandleCrash()
atomic.AddInt32(&proxier.numProxyLoops, 1)
@ -401,13 +399,13 @@ func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol
return si, nil
}
func (proxier *Proxier) mergeService(service *api.Service) sets.String {
func (proxier *Proxier) mergeService(service *v1.Service) sets.String {
if service == nil {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return nil
}
existingPorts := sets.NewString()
@ -422,25 +420,25 @@ func (proxier *Proxier) mergeService(service *api.Service) sets.String {
continue
}
if exists {
glog.V(4).Infof("Something changed for service %q: stopping it", serviceName)
klog.V(4).Infof("Something changed for service %q: stopping it", serviceName)
if err := proxier.closePortal(serviceName, info); err != nil {
glog.Errorf("Failed to close portal for %q: %v", serviceName, err)
klog.Errorf("Failed to close portal for %q: %v", serviceName, err)
}
if err := proxier.stopProxy(serviceName, info); err != nil {
glog.Errorf("Failed to stop service %q: %v", serviceName, err)
klog.Errorf("Failed to stop service %q: %v", serviceName, err)
}
}
proxyPort, err := proxier.proxyPorts.AllocateNext()
if err != nil {
glog.Errorf("failed to allocate proxy port for service %q: %v", serviceName, err)
klog.Errorf("failed to allocate proxy port for service %q: %v", serviceName, err)
continue
}
serviceIP := net.ParseIP(service.Spec.ClusterIP)
glog.V(1).Infof("Adding new service %q at %s/%s", serviceName, net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), servicePort.Protocol)
klog.V(1).Infof("Adding new service %q at %s/%s", serviceName, net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), servicePort.Protocol)
info, err = proxier.addServiceOnPort(serviceName, servicePort.Protocol, proxyPort, proxier.udpIdleTimeout)
if err != nil {
glog.Errorf("Failed to start proxy for %q: %v", serviceName, err)
klog.Errorf("Failed to start proxy for %q: %v", serviceName, err)
continue
}
info.portal.ip = serviceIP
@ -451,14 +449,14 @@ func (proxier *Proxier) mergeService(service *api.Service) sets.String {
info.nodePort = int(servicePort.NodePort)
info.sessionAffinityType = service.Spec.SessionAffinity
// Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
info.stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
glog.V(4).Infof("info: %#v", info)
klog.V(4).Infof("info: %#v", info)
if err := proxier.openPortal(serviceName, info); err != nil {
glog.Errorf("Failed to open portal for %q: %v", serviceName, err)
klog.Errorf("Failed to open portal for %q: %v", serviceName, err)
}
proxier.loadBalancer.NewService(serviceName, info.sessionAffinityType, info.stickyMaxAgeSeconds)
}
@ -466,13 +464,13 @@ func (proxier *Proxier) mergeService(service *api.Service) sets.String {
return existingPorts
}
func (proxier *Proxier) unmergeService(service *api.Service, existingPorts sets.String) {
func (proxier *Proxier) unmergeService(service *v1.Service, existingPorts sets.String) {
if service == nil {
return
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return
}
@ -486,49 +484,49 @@ func (proxier *Proxier) unmergeService(service *api.Service, existingPorts sets.
}
serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
glog.V(1).Infof("Stopping service %q", serviceName)
klog.V(1).Infof("Stopping service %q", serviceName)
info, exists := proxier.serviceMap[serviceName]
if !exists {
glog.Errorf("Service %q is being removed but doesn't exist", serviceName)
klog.Errorf("Service %q is being removed but doesn't exist", serviceName)
continue
}
if proxier.serviceMap[serviceName].protocol == api.ProtocolUDP {
if proxier.serviceMap[serviceName].protocol == v1.ProtocolUDP {
staleUDPServices.Insert(proxier.serviceMap[serviceName].portal.ip.String())
}
if err := proxier.closePortal(serviceName, info); err != nil {
glog.Errorf("Failed to close portal for %q: %v", serviceName, err)
klog.Errorf("Failed to close portal for %q: %v", serviceName, err)
}
if err := proxier.stopProxyInternal(serviceName, info); err != nil {
glog.Errorf("Failed to stop service %q: %v", serviceName, err)
klog.Errorf("Failed to stop service %q: %v", serviceName, err)
}
proxier.loadBalancer.DeleteService(serviceName)
}
for _, svcIP := range staleUDPServices.UnsortedList() {
if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil {
glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err)
klog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err)
}
}
}
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
func (proxier *Proxier) OnServiceAdd(service *v1.Service) {
_ = proxier.mergeService(service)
}
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) {
existingPorts := proxier.mergeService(service)
proxier.unmergeService(oldService, existingPorts)
}
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
func (proxier *Proxier) OnServiceDelete(service *v1.Service) {
proxier.unmergeService(service, sets.NewString())
}
func (proxier *Proxier) OnServiceSynced() {
}
func sameConfig(info *ServiceInfo, service *api.Service, port *api.ServicePort) bool {
func sameConfig(info *ServiceInfo, service *v1.Service, port *v1.ServicePort) bool {
if info.protocol != port.Protocol || info.portal.port != int(port.Port) || info.nodePort != int(port.NodePort) {
return false
}
@ -587,7 +585,7 @@ func (proxier *Proxier) openPortal(service proxy.ServicePortName, info *ServiceI
return nil
}
func (proxier *Proxier) openOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
func (proxier *Proxier) openOnePortal(portal portal, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
if local, err := utilproxy.IsLocalIP(portal.ip.String()); err != nil {
return fmt.Errorf("can't determine if IP %s is local, assuming not: %v", portal.ip, err)
} else if local {
@ -602,31 +600,31 @@ func (proxier *Proxier) openOnePortal(portal portal, protocol api.Protocol, prox
portalAddress := net.JoinHostPort(portal.ip.String(), strconv.Itoa(portal.port))
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q, args:%v", iptablesContainerPortalChain, name, args)
klog.Errorf("Failed to install iptables %s rule for service %q, args:%v", iptablesContainerPortalChain, name, args)
return err
}
if !existed {
glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s", name, protocol, portalAddress)
klog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s", name, protocol, portalAddress)
}
if portal.isExternal {
args := proxier.iptablesContainerPortalArgs(portal.ip, false, true, portal.port, protocol, proxyIP, proxyPort, name)
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule that opens service %q for local traffic, args:%v", iptablesContainerPortalChain, name, args)
klog.Errorf("Failed to install iptables %s rule that opens service %q for local traffic, args:%v", iptablesContainerPortalChain, name, args)
return err
}
if !existed {
glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s for local traffic", name, protocol, portalAddress)
klog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s for local traffic", name, protocol, portalAddress)
}
args = proxier.iptablesHostPortalArgs(portal.ip, true, portal.port, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q for dst-local traffic", iptablesHostPortalChain, name)
klog.Errorf("Failed to install iptables %s rule for service %q for dst-local traffic", iptablesHostPortalChain, name)
return err
}
if !existed {
glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s for dst-local traffic", name, protocol, portalAddress)
klog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s for dst-local traffic", name, protocol, portalAddress)
}
return nil
}
@ -635,18 +633,18 @@ func (proxier *Proxier) openOnePortal(portal portal, protocol api.Protocol, prox
args = proxier.iptablesHostPortalArgs(portal.ip, false, portal.port, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name)
klog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name)
return err
}
if !existed {
glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s", name, protocol, portalAddress)
klog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s", name, protocol, portalAddress)
}
return nil
}
// Marks a port as being owned by a particular service, or returns error if already claimed.
// Idempotent: reclaiming with the same owner is not an error
func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol api.Protocol, owner proxy.ServicePortName) error {
func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol v1.Protocol, owner proxy.ServicePortName) error {
proxier.portMapMutex.Lock()
defer proxier.portMapMutex.Unlock()
@ -667,7 +665,7 @@ func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol api.Protocol
return fmt.Errorf("can't open node port for %s: %v", key.String(), err)
}
proxier.portMap[key] = &portMapValue{owner: owner, socket: socket}
glog.V(2).Infof("Claimed local port %s", key.String())
klog.V(2).Infof("Claimed local port %s", key.String())
return nil
}
if existing.owner == owner {
@ -679,7 +677,7 @@ func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol api.Protocol
// Release a claim on a port. Returns an error if the owner does not match the claim.
// Tolerates release on an unclaimed port, to simplify .
func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol api.Protocol, owner proxy.ServicePortName) error {
func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol v1.Protocol, owner proxy.ServicePortName) error {
proxier.portMapMutex.Lock()
defer proxier.portMapMutex.Unlock()
@ -687,7 +685,7 @@ func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol api.Protoc
existing, found := proxier.portMap[key]
if !found {
// We tolerate this, it happens if we are cleaning up a failed allocation
glog.Infof("Ignoring release on unowned port: %v", key)
klog.Infof("Ignoring release on unowned port: %v", key)
return nil
}
if existing.owner != owner {
@ -698,7 +696,7 @@ func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol api.Protoc
return nil
}
func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
func (proxier *Proxier) openNodePort(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
// TODO: Do we want to allow containers to access public services? Probably yes.
// TODO: We could refactor this to be the same code as portal, but with IP == nil
@ -708,35 +706,35 @@ func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyI
}
// Handle traffic from containers.
args := proxier.iptablesContainerNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
args := proxier.iptablesContainerPortalArgs(nil, false, false, nodePort, protocol, proxyIP, proxyPort, name)
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerNodePortChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name)
klog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name)
return err
}
if !existed {
glog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort)
klog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort)
}
// Handle traffic from the host.
args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostNodePortChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name)
klog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name)
return err
}
if !existed {
glog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort)
klog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort)
}
args = proxier.iptablesNonLocalNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableFilter, iptablesNonLocalNodePortChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesNonLocalNodePortChain, name)
klog.Errorf("Failed to install iptables %s rule for service %q", iptablesNonLocalNodePortChain, name)
return err
}
if !existed {
glog.Infof("Opened iptables from-non-local public port for service %q on %s port %d", name, protocol, nodePort)
klog.Infof("Opened iptables from-non-local public port for service %q on %s port %d", name, protocol, nodePort)
}
return nil
@ -757,14 +755,14 @@ func (proxier *Proxier) closePortal(service proxy.ServicePortName, info *Service
el = append(el, proxier.closeNodePort(info.nodePort, info.protocol, proxier.listenIP, info.proxyPort, service)...)
}
if len(el) == 0 {
glog.V(3).Infof("Closed iptables portals for service %q", service)
klog.V(3).Infof("Closed iptables portals for service %q", service)
} else {
glog.Errorf("Some errors closing iptables portals for service %q", service)
klog.Errorf("Some errors closing iptables portals for service %q", service)
}
return utilerrors.NewAggregate(el)
}
func (proxier *Proxier) closeOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error {
func (proxier *Proxier) closeOnePortal(portal portal, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error {
el := []error{}
if local, err := utilproxy.IsLocalIP(portal.ip.String()); err != nil {
@ -778,20 +776,20 @@ func (proxier *Proxier) closeOnePortal(portal portal, protocol api.Protocol, pro
// Handle traffic from containers.
args := proxier.iptablesContainerPortalArgs(portal.ip, portal.isExternal, false, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name)
el = append(el, err)
}
if portal.isExternal {
args := proxier.iptablesContainerPortalArgs(portal.ip, false, true, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name)
el = append(el, err)
}
args = proxier.iptablesHostPortalArgs(portal.ip, true, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name)
el = append(el, err)
}
return el
@ -800,34 +798,34 @@ func (proxier *Proxier) closeOnePortal(portal portal, protocol api.Protocol, pro
// Handle traffic from the host (portalIP is not external).
args = proxier.iptablesHostPortalArgs(portal.ip, false, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name)
el = append(el, err)
}
return el
}
func (proxier *Proxier) closeNodePort(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error {
func (proxier *Proxier) closeNodePort(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error {
el := []error{}
// Handle traffic from containers.
args := proxier.iptablesContainerNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
args := proxier.iptablesContainerPortalArgs(nil, false, false, nodePort, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerNodePortChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerNodePortChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerNodePortChain, name)
el = append(el, err)
}
// Handle traffic from the host.
args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostNodePortChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostNodePortChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostNodePortChain, name)
el = append(el, err)
}
// Handle traffic not local to the host
args = proxier.iptablesNonLocalNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableFilter, iptablesNonLocalNodePortChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesNonLocalNodePortChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesNonLocalNodePortChain, name)
el = append(el, err)
}
@ -936,7 +934,7 @@ func iptablesFlush(ipt iptables.Interface) error {
el = append(el, err)
}
if len(el) != 0 {
glog.Errorf("Some errors flushing old iptables portals: %v", el)
klog.Errorf("Some errors flushing old iptables portals: %v", el)
}
return utilerrors.NewAggregate(el)
}
@ -949,7 +947,7 @@ var zeroIPv6 = net.ParseIP("::")
var localhostIPv6 = net.ParseIP("::1")
// Build a slice of iptables args that are common to from-container and from-host portal rules.
func iptablesCommonPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol api.Protocol, service proxy.ServicePortName) []string {
func iptablesCommonPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol v1.Protocol, service proxy.ServicePortName) []string {
// This list needs to include all fields as they are eventually spit out
// by iptables-save. This is because some systems do not support the
// 'iptables -C' arg, and so fall back on parsing iptables-save output.
@ -982,7 +980,7 @@ func iptablesCommonPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, add
}
// Build a slice of iptables args for a from-container portal rule.
func (proxier *Proxier) iptablesContainerPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
func (proxier *Proxier) iptablesContainerPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
args := iptablesCommonPortalArgs(destIP, addPhysicalInterfaceMatch, addDstLocalMatch, destPort, protocol, service)
// This is tricky.
@ -1029,7 +1027,7 @@ func (proxier *Proxier) iptablesContainerPortalArgs(destIP net.IP, addPhysicalIn
}
// Build a slice of iptables args for a from-host portal rule.
func (proxier *Proxier) iptablesHostPortalArgs(destIP net.IP, addDstLocalMatch bool, destPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
func (proxier *Proxier) iptablesHostPortalArgs(destIP net.IP, addDstLocalMatch bool, destPort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
args := iptablesCommonPortalArgs(destIP, false, addDstLocalMatch, destPort, protocol, service)
// This is tricky.
@ -1061,27 +1059,10 @@ func (proxier *Proxier) iptablesHostPortalArgs(destIP net.IP, addDstLocalMatch b
return args
}
// Build a slice of iptables args for a from-container public-port rule.
// See iptablesContainerPortalArgs
// TODO: Should we just reuse iptablesContainerPortalArgs?
func (proxier *Proxier) iptablesContainerNodePortArgs(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
args := iptablesCommonPortalArgs(nil, false, false, nodePort, protocol, service)
if proxyIP.Equal(zeroIPv4) || proxyIP.Equal(zeroIPv6) {
// TODO: Can we REDIRECT with IPv6?
args = append(args, "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", proxyPort))
} else {
// TODO: Can we DNAT with IPv6?
args = append(args, "-j", "DNAT", "--to-destination", net.JoinHostPort(proxyIP.String(), strconv.Itoa(proxyPort)))
}
return args
}
// Build a slice of iptables args for a from-host public-port rule.
// See iptablesHostPortalArgs
// TODO: Should we just reuse iptablesHostPortalArgs?
func (proxier *Proxier) iptablesHostNodePortArgs(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
func (proxier *Proxier) iptablesHostNodePortArgs(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
args := iptablesCommonPortalArgs(nil, false, false, nodePort, protocol, service)
if proxyIP.Equal(zeroIPv4) || proxyIP.Equal(zeroIPv6) {
@ -1093,7 +1074,7 @@ func (proxier *Proxier) iptablesHostNodePortArgs(nodePort int, protocol api.Prot
}
// Build a slice of iptables args for an from-non-local public-port rule.
func (proxier *Proxier) iptablesNonLocalNodePortArgs(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
func (proxier *Proxier) iptablesNonLocalNodePortArgs(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
args := iptablesCommonPortalArgs(nil, false, false, proxyPort, protocol, service)
args = append(args, "-m", "state", "--state", "NEW", "-j", "ACCEPT")
return args

View File

@ -29,10 +29,10 @@ import (
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec"
@ -228,11 +228,11 @@ func waitForNumProxyClients(t *testing.T, s *ServiceInfo, want int, timeout time
func TestTCPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -255,11 +255,11 @@ func TestTCPProxy(t *testing.T) {
func TestUDPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -282,11 +282,11 @@ func TestUDPProxy(t *testing.T) {
func TestUDPProxyTimeout(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -314,18 +314,18 @@ func TestMultiPortProxy(t *testing.T) {
lb := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
}},
})
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
}},
})
@ -366,9 +366,9 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
}
waitForNumProxyLoops(t, p, 0)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: 80,
Protocol: "TCP",
@ -413,11 +413,11 @@ func stopProxyByName(proxier *Proxier, service proxy.ServicePortName) error {
func TestTCPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -457,11 +457,11 @@ func TestTCPProxyStop(t *testing.T) {
func TestUDPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -495,11 +495,11 @@ func TestUDPProxyStop(t *testing.T) {
func TestTCPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -522,9 +522,9 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
@ -539,11 +539,11 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
func TestUDPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -566,9 +566,9 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
@ -583,11 +583,11 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
endpoint := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
@ -611,9 +611,9 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
@ -626,9 +626,9 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
@ -645,11 +645,11 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
endpoint := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
@ -673,9 +673,9 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
@ -688,9 +688,9 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
@ -707,11 +707,11 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
func TestTCPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -730,9 +730,9 @@ func TestTCPProxyUpdatePort(t *testing.T) {
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: 99,
Protocol: "TCP",
@ -755,11 +755,11 @@ func TestTCPProxyUpdatePort(t *testing.T) {
func TestUDPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -777,9 +777,9 @@ func TestUDPProxyUpdatePort(t *testing.T) {
}
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: 99,
Protocol: "UDP",
@ -800,11 +800,11 @@ func TestUDPProxyUpdatePort(t *testing.T) {
func TestProxyUpdatePublicIPs(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -823,10 +823,10 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
@ -852,11 +852,11 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
func TestProxyUpdatePortal(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
endpoint := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
@ -876,18 +876,18 @@ func TestProxyUpdatePortal(t *testing.T) {
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
svcv0 := &api.Service{
svcv0 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
}
svcv1 := &api.Service{
svcv1 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
@ -899,9 +899,9 @@ func TestProxyUpdatePortal(t *testing.T) {
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
}
svcv2 := &api.Service{
svcv2 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "None", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
@ -913,9 +913,9 @@ func TestProxyUpdatePortal(t *testing.T) {
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
}
svcv3 := &api.Service{
svcv3 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",

View File

@ -25,9 +25,9 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/proxy"
)
@ -45,7 +45,7 @@ type ProxySocket interface {
ListenPort() int
}
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (ProxySocket, error) {
func newProxySocket(protocol v1.Protocol, ip net.IP, port int) (ProxySocket, error) {
host := ""
if ip != nil {
host = ip.String()
@ -68,6 +68,8 @@ func newProxySocket(protocol api.Protocol, ip net.IP, port int) (ProxySocket, er
return nil, err
}
return &udpProxySocket{UDPConn: conn, port: port}, nil
case "SCTP":
return nil, fmt.Errorf("SCTP is not supported for user space proxy")
}
return nil, fmt.Errorf("unknown protocol %q", protocol)
}
@ -93,10 +95,10 @@ func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protoc
for _, dialTimeout := range EndpointDialTimeouts {
endpoint, err := loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset)
if err != nil {
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
klog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
return nil, err
}
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
klog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
@ -104,7 +106,7 @@ func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protoc
if isTooManyFDsError(err) {
panic("Dial failed: " + err.Error())
}
glog.Errorf("Dial failed: %v", err)
klog.Errorf("Dial failed: %v", err)
sessionAffinityReset = true
continue
}
@ -133,13 +135,13 @@ func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv
// Then the service port was just closed so the accept failure is to be expected.
return
}
glog.Errorf("Accept failed: %v", err)
klog.Errorf("Accept failed: %v", err)
continue
}
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
klog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
outConn, err := TryConnectEndpoints(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", loadBalancer)
if err != nil {
glog.Errorf("Failed to connect to balancer: %v", err)
klog.Errorf("Failed to connect to balancer: %v", err)
inConn.Close()
continue
}
@ -152,7 +154,7 @@ func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv
func ProxyTCP(in, out *net.TCPConn) {
var wg sync.WaitGroup
wg.Add(2)
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
klog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
go copyBytes("from backend", in, out, &wg)
go copyBytes("to backend", out, in, &wg)
@ -161,14 +163,14 @@ func ProxyTCP(in, out *net.TCPConn) {
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
defer wg.Done()
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
klog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
n, err := io.Copy(dest, src)
if err != nil {
if !isClosedError(err) {
glog.Errorf("I/O error: %v", err)
klog.Errorf("I/O error: %v", err)
}
}
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
klog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
dest.Close()
src.Close()
}
@ -213,11 +215,11 @@ func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv
if err != nil {
if e, ok := err.(net.Error); ok {
if e.Temporary() {
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
klog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
continue
}
}
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
klog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
break
}
// If this is a client we know already, reuse the connection and goroutine.
@ -230,14 +232,14 @@ func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv
_, err = svrConn.Write(buffer[0:n])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Write failed: %v", err)
klog.Errorf("Write failed: %v", err)
// TODO: Maybe tear down the goroutine for this client/server pair?
}
continue
}
err = svrConn.SetDeadline(time.Now().Add(myInfo.Timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
klog.Errorf("SetDeadline failed: %v", err)
continue
}
}
@ -251,14 +253,14 @@ func (udp *udpProxySocket) getBackendConn(activeClients *ClientCache, cliAddr ne
if !found {
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
glog.V(3).Infof("New UDP connection from %s", cliAddr)
klog.V(3).Infof("New UDP connection from %s", cliAddr)
var err error
svrConn, err = TryConnectEndpoints(service, cliAddr, "udp", loadBalancer)
if err != nil {
return nil, err
}
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
glog.Errorf("SetDeadline failed: %v", err)
klog.Errorf("SetDeadline failed: %v", err)
return nil, err
}
activeClients.Clients[cliAddr.String()] = svrConn
@ -279,19 +281,19 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ
n, err := svrConn.Read(buffer[0:])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Read failed: %v", err)
klog.Errorf("Read failed: %v", err)
}
break
}
err = svrConn.SetDeadline(time.Now().Add(timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
klog.Errorf("SetDeadline failed: %v", err)
break
}
n, err = udp.WriteTo(buffer[0:n], cliAddr)
if err != nil {
if !logTimeout(err) {
glog.Errorf("WriteTo failed: %v", err)
klog.Errorf("WriteTo failed: %v", err)
}
break
}

View File

@ -25,9 +25,9 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/util/slice"
)
@ -46,7 +46,7 @@ type affinityState struct {
}
type affinityPolicy struct {
affinityType api.ServiceAffinity
affinityType v1.ServiceAffinity
affinityMap map[string]*affinityState // map client IP -> affinity info
ttlSeconds int
}
@ -66,7 +66,7 @@ type balancerState struct {
affinity affinityPolicy
}
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlSeconds int) *affinityPolicy {
func newAffinityPolicy(affinityType v1.ServiceAffinity, ttlSeconds int) *affinityPolicy {
return &affinityPolicy{
affinityType: affinityType,
affinityMap: make(map[string]*affinityState),
@ -81,8 +81,8 @@ func NewLoadBalancerRR() *LoadBalancerRR {
}
}
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) error {
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error {
klog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
@ -90,14 +90,14 @@ func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType
}
// This assumes that lb.lock is already held.
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) *balancerState {
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) *balancerState {
if ttlSeconds == 0 {
ttlSeconds = int(api.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
ttlSeconds = int(v1.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
}
if _, exists := lb.services[svcPort]; !exists {
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
klog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
} else if affinityType != "" {
lb.services[svcPort].affinity.affinityType = affinityType
}
@ -105,7 +105,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi
}
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
klog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
delete(lb.services, svcPort)
@ -114,7 +114,7 @@ func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
// return true if this service is using some form of session affinity.
func isSessionAffinity(affinity *affinityPolicy) bool {
// Should never be empty string, but checking for it to be safe.
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
if affinity.affinityType == "" || affinity.affinityType == v1.ServiceAffinityNone {
return false
}
return true
@ -145,7 +145,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
if len(state.endpoints) == 0 {
return "", ErrMissingEndpoints
}
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
klog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
@ -163,7 +163,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
// Affinity wins.
endpoint := sessionAffinity.endpoint
sessionAffinity.lastUsed = time.Now()
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
klog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
return endpoint, nil
}
}
@ -182,7 +182,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
affinity.lastUsed = time.Now()
affinity.endpoint = endpoint
affinity.clientIP = ipaddr
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
klog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
}
return endpoint, nil
@ -214,7 +214,7 @@ func flattenValidEndpoints(endpoints []hostPortPair) []string {
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
for _, affinity := range state.affinity.affinityMap {
if affinity.endpoint == endpoint {
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
klog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
delete(state.affinity.affinityMap, affinity.clientIP)
}
}
@ -237,7 +237,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn
}
for mKey, mVal := range allEndpoints {
if mVal == 1 {
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
klog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
removeSessionAffinityByEndpoint(state, svcPort, mKey)
}
}
@ -245,7 +245,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn
// buildPortsToEndpointsMap builds a map of portname -> all ip:ports for that
// portname. Expode Endpoints.Subsets[*] into this structure.
func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPair {
func buildPortsToEndpointsMap(endpoints *v1.Endpoints) map[string][]hostPortPair {
portsToEndpoints := map[string][]hostPortPair{}
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
@ -261,7 +261,7 @@ func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPai
return portsToEndpoints
}
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
@ -273,13 +273,13 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
state, exists := lb.services[svcPort]
if !exists || state == nil || len(newEndpoints) > 0 {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsAdd can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
@ -288,7 +288,7 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
}
}
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
oldPortsToEndpoints := buildPortsToEndpointsMap(oldEndpoints)
registeredEndpoints := make(map[proxy.ServicePortName]bool)
@ -307,13 +307,13 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoin
}
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsUpdate can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
@ -335,7 +335,7 @@ func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) {
// If the service is still around, reset but don't delete.
if state, ok := lb.services[svcPort]; ok {
if len(state.endpoints) > 0 {
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
state.endpoints = []string{}
}
state.index = 0
@ -343,7 +343,7 @@ func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) {
}
}
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *v1.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
@ -379,7 +379,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa
}
for ip, affinity := range state.affinity.affinityMap {
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
klog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
delete(state.affinity.affinityMap, ip)
}
}

View File

@ -20,9 +20,9 @@ import (
"net"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
)
@ -104,11 +104,11 @@ func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 40}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
@ -141,11 +141,11 @@ func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
@ -168,16 +168,16 @@ func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
},
},
}
@ -210,20 +210,20 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpointsv1 := &api.Endpoints{
endpointsv1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
Addresses: []v1.EndpointAddress{{IP: "endpoint2"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
},
},
}
@ -249,16 +249,16 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
endpointsv2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
Addresses: []v1.EndpointAddress{{IP: "endpoint5"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
},
},
}
@ -283,7 +283,7 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
@ -300,21 +300,21 @@ func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints1 := &api.Endpoints{
endpoints1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 123}},
},
},
}
endpoints2 := &api.Endpoints{
endpoints2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 456}},
},
},
}
@ -357,13 +357,13 @@ func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
@ -413,15 +413,15 @@ func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
}
// Call OnEndpointsUpdate() before NewService()
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
@ -473,13 +473,13 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
@ -494,12 +494,12 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
client3Endpoint := shuffledEndpoints[2]
endpointsv2 := &api.Endpoints{
endpointsv2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}},
},
},
}
@ -516,12 +516,12 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
endpointsv3 := &api.Endpoints{
endpointsv3 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
},
},
}
@ -546,13 +546,13 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
@ -567,12 +567,12 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
endpointsv2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
@ -586,7 +586,7 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
@ -605,24 +605,24 @@ func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints1 := &api.Endpoints{
loadBalancer.NewService(fooService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints2 := &api.Endpoints{
loadBalancer.NewService(barService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
@ -674,13 +674,13 @@ func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)