mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
20
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD
generated
vendored
@ -17,17 +17,17 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/winuserspace",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/ipconfig:go_default_library",
|
||||
"//pkg/util/netsh:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/github.com/miekg/dns:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -40,12 +40,12 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/netsh/testing:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go
generated
vendored
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"net"
|
||||
)
|
||||
@ -27,7 +27,7 @@ type LoadBalancer interface {
|
||||
// NextEndpoint returns the endpoint to handle a request for the given
|
||||
// service-port and source address.
|
||||
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
|
||||
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeMinutes int) error
|
||||
NewService(service proxy.ServicePortName, sessionAffinityType v1.ServiceAffinity, stickyMaxAgeMinutes int) error
|
||||
DeleteService(service proxy.ServicePortName)
|
||||
CleanupStaleStickySessions(service proxy.ServicePortName)
|
||||
}
|
||||
|
62
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go
generated
vendored
62
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go
generated
vendored
@ -25,13 +25,13 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/netsh"
|
||||
)
|
||||
@ -47,12 +47,12 @@ type portal struct {
|
||||
type serviceInfo struct {
|
||||
isAliveAtomic int32 // Only access this with atomic ops
|
||||
portal portal
|
||||
protocol api.Protocol
|
||||
protocol v1.Protocol
|
||||
socket proxySocket
|
||||
timeout time.Duration
|
||||
activeClients *clientCache
|
||||
dnsClients *dnsClientCache
|
||||
sessionAffinityType api.ServiceAffinity
|
||||
sessionAffinityType v1.ServiceAffinity
|
||||
}
|
||||
|
||||
func (info *serviceInfo) setAlive(b bool) {
|
||||
@ -70,7 +70,7 @@ func (info *serviceInfo) isAlive() bool {
|
||||
func logTimeout(err error) bool {
|
||||
if e, ok := err.(net.Error); ok {
|
||||
if e.Timeout() {
|
||||
glog.V(3).Infof("connection to endpoint closed due to inactivity")
|
||||
klog.V(3).Infof("connection to endpoint closed due to inactivity")
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -100,7 +100,7 @@ var _ proxy.ProxyProvider = &Proxier{}
|
||||
type portMapKey struct {
|
||||
ip string
|
||||
port int
|
||||
protocol api.Protocol
|
||||
protocol v1.Protocol
|
||||
}
|
||||
|
||||
func (k *portMapKey) String() string {
|
||||
@ -140,7 +140,7 @@ func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interfac
|
||||
return nil, fmt.Errorf("failed to select a host interface: %v", err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Setting proxy IP to %v", hostIP)
|
||||
klog.V(2).Infof("Setting proxy IP to %v", hostIP)
|
||||
return createProxier(loadBalancer, listenIP, netsh, hostIP, syncPeriod, udpIdleTimeout)
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ func (proxier *Proxier) SyncLoop() {
|
||||
defer t.Stop()
|
||||
for {
|
||||
<-t.C
|
||||
glog.V(6).Infof("Periodic sync")
|
||||
klog.V(6).Infof("Periodic sync")
|
||||
proxier.Sync()
|
||||
}
|
||||
}
|
||||
@ -223,7 +223,7 @@ func (proxier *Proxier) setServiceInfo(service ServicePortPortalName, info *serv
|
||||
|
||||
// addServicePortPortal starts listening for a new service, returning the serviceInfo.
|
||||
// The timeout only applies to UDP connections, for now.
|
||||
func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol api.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) {
|
||||
func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol v1.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) {
|
||||
var serviceIP net.IP
|
||||
if listenIP != allAvailableInterfaces {
|
||||
if serviceIP = net.ParseIP(listenIP); serviceIP == nil {
|
||||
@ -234,7 +234,7 @@ func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPo
|
||||
if existed, err := proxier.netsh.EnsureIPAddress(args, serviceIP); err != nil {
|
||||
return nil, err
|
||||
} else if !existed {
|
||||
glog.V(3).Infof("Added ip address to fowarder interface for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
|
||||
klog.V(3).Infof("Added ip address to fowarder interface for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
|
||||
}
|
||||
}
|
||||
|
||||
@ -255,11 +255,11 @@ func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPo
|
||||
timeout: timeout,
|
||||
activeClients: newClientCache(),
|
||||
dnsClients: newDNSClientCache(),
|
||||
sessionAffinityType: api.ServiceAffinityNone, // default
|
||||
sessionAffinityType: v1.ServiceAffinityNone, // default
|
||||
}
|
||||
proxier.setServiceInfo(servicePortPortalName, si)
|
||||
|
||||
glog.V(2).Infof("Proxying for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
|
||||
klog.V(2).Infof("Proxying for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
|
||||
go func(service ServicePortPortalName, proxier *Proxier) {
|
||||
defer runtime.HandleCrash()
|
||||
atomic.AddInt32(&proxier.numProxyLoops, 1)
|
||||
@ -288,7 +288,7 @@ func (proxier *Proxier) closeServicePortPortal(servicePortPortalName ServicePort
|
||||
}
|
||||
|
||||
// getListenIPPortMap returns a slice of all listen IPs for a service.
|
||||
func getListenIPPortMap(service *api.Service, listenPort int, nodePort int) map[string]int {
|
||||
func getListenIPPortMap(service *v1.Service, listenPort int, nodePort int) map[string]int {
|
||||
listenIPPortMap := make(map[string]int)
|
||||
listenIPPortMap[service.Spec.ClusterIP] = listenPort
|
||||
|
||||
@ -307,13 +307,13 @@ func getListenIPPortMap(service *api.Service, listenPort int, nodePort int) map[
|
||||
return listenIPPortMap
|
||||
}
|
||||
|
||||
func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortalName]bool {
|
||||
func (proxier *Proxier) mergeService(service *v1.Service) map[ServicePortPortalName]bool {
|
||||
if service == nil {
|
||||
return nil
|
||||
}
|
||||
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
||||
if !helper.IsServiceIPSet(service) {
|
||||
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
|
||||
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
|
||||
return nil
|
||||
}
|
||||
existingPortPortals := make(map[ServicePortPortalName]bool)
|
||||
@ -337,19 +337,19 @@ func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortal
|
||||
continue
|
||||
}
|
||||
if exists {
|
||||
glog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName)
|
||||
klog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName)
|
||||
if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
|
||||
glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
|
||||
klog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
|
||||
}
|
||||
}
|
||||
glog.V(1).Infof("Adding new service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), protocol)
|
||||
klog.V(1).Infof("Adding new service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), protocol)
|
||||
info, err := proxier.addServicePortPortal(servicePortPortalName, protocol, listenIP, listenPort, proxier.udpIdleTimeout)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err)
|
||||
klog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err)
|
||||
continue
|
||||
}
|
||||
info.sessionAffinityType = service.Spec.SessionAffinity
|
||||
glog.V(10).Infof("info: %#v", info)
|
||||
klog.V(10).Infof("info: %#v", info)
|
||||
}
|
||||
if len(listenIPPortMap) > 0 {
|
||||
// only one loadbalancer per service port portal
|
||||
@ -361,7 +361,7 @@ func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortal
|
||||
Port: servicePort.Name,
|
||||
}
|
||||
timeoutSeconds := 0
|
||||
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
|
||||
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
|
||||
timeoutSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
|
||||
}
|
||||
proxier.loadBalancer.NewService(servicePortName, service.Spec.SessionAffinity, timeoutSeconds)
|
||||
@ -371,13 +371,13 @@ func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortal
|
||||
return existingPortPortals
|
||||
}
|
||||
|
||||
func (proxier *Proxier) unmergeService(service *api.Service, existingPortPortals map[ServicePortPortalName]bool) {
|
||||
func (proxier *Proxier) unmergeService(service *v1.Service, existingPortPortals map[ServicePortPortalName]bool) {
|
||||
if service == nil {
|
||||
return
|
||||
}
|
||||
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
||||
if !helper.IsServiceIPSet(service) {
|
||||
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
|
||||
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
|
||||
return
|
||||
}
|
||||
|
||||
@ -409,15 +409,15 @@ func (proxier *Proxier) unmergeService(service *api.Service, existingPortPortals
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Stopping service %q", servicePortPortalName)
|
||||
klog.V(1).Infof("Stopping service %q", servicePortPortalName)
|
||||
info, exists := proxier.getServiceInfo(servicePortPortalName)
|
||||
if !exists {
|
||||
glog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName)
|
||||
klog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
|
||||
glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
|
||||
klog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -428,23 +428,23 @@ func (proxier *Proxier) unmergeService(service *api.Service, existingPortPortals
|
||||
}
|
||||
}
|
||||
|
||||
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
|
||||
func (proxier *Proxier) OnServiceAdd(service *v1.Service) {
|
||||
_ = proxier.mergeService(service)
|
||||
}
|
||||
|
||||
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
|
||||
func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) {
|
||||
existingPortPortals := proxier.mergeService(service)
|
||||
proxier.unmergeService(oldService, existingPortPortals)
|
||||
}
|
||||
|
||||
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
|
||||
func (proxier *Proxier) OnServiceDelete(service *v1.Service) {
|
||||
proxier.unmergeService(service, map[ServicePortPortalName]bool{})
|
||||
}
|
||||
|
||||
func (proxier *Proxier) OnServiceSynced() {
|
||||
}
|
||||
|
||||
func sameConfig(info *serviceInfo, service *api.Service, protocol api.Protocol, listenPort int) bool {
|
||||
func sameConfig(info *serviceInfo, service *v1.Service, protocol v1.Protocol, listenPort int) bool {
|
||||
return info.protocol == protocol && info.portal.port == listenPort && info.sessionAffinityType == service.Spec.SessionAffinity
|
||||
}
|
||||
|
||||
|
180
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier_test.go
generated
vendored
180
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier_test.go
generated
vendored
@ -29,10 +29,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
netshtest "k8s.io/kubernetes/pkg/util/netsh/testing"
|
||||
)
|
||||
@ -241,11 +241,11 @@ func getPortNum(t *testing.T, addr string) int {
|
||||
func TestTCPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -268,11 +268,11 @@ func TestTCPProxy(t *testing.T) {
|
||||
func TestUDPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -295,11 +295,11 @@ func TestUDPProxy(t *testing.T) {
|
||||
func TestUDPProxyTimeout(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -327,18 +327,18 @@ func TestMultiPortProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -379,9 +379,9 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
p.OnServiceAdd(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "0.0.0.0", Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: "0.0.0.0", Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 0,
|
||||
Protocol: "TCP",
|
||||
@ -430,11 +430,11 @@ func stopProxyByName(proxier *Proxier, service ServicePortPortalName) error {
|
||||
func TestTCPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -474,11 +474,11 @@ func TestTCPProxyStop(t *testing.T) {
|
||||
func TestUDPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -512,11 +512,11 @@ func TestUDPProxyStop(t *testing.T) {
|
||||
func TestTCPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -540,9 +540,9 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
p.OnServiceDelete(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "TCP",
|
||||
@ -557,11 +557,11 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
|
||||
func TestUDPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -584,9 +584,9 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
p.OnServiceDelete(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "UDP",
|
||||
@ -601,11 +601,11 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
|
||||
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := &api.Endpoints{
|
||||
endpoint := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
@ -629,9 +629,9 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
p.OnServiceDelete(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "TCP",
|
||||
@ -644,9 +644,9 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
|
||||
// need to add endpoint here because it got clean up during service delete
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
p.OnServiceAdd(&api.Service{
|
||||
p.OnServiceAdd(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "TCP",
|
||||
@ -663,11 +663,11 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := &api.Endpoints{
|
||||
endpoint := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
@ -691,9 +691,9 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
p.OnServiceDelete(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "UDP",
|
||||
@ -706,9 +706,9 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
|
||||
// need to add endpoint here because it got clean up during service delete
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
p.OnServiceAdd(&api.Service{
|
||||
p.OnServiceAdd(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "UDP",
|
||||
@ -725,11 +725,11 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
func TestTCPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -748,9 +748,9 @@ func TestTCPProxyUpdatePort(t *testing.T) {
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
p.OnServiceAdd(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 0,
|
||||
Protocol: "TCP",
|
||||
@ -773,11 +773,11 @@ func TestTCPProxyUpdatePort(t *testing.T) {
|
||||
func TestUDPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -795,9 +795,9 @@ func TestUDPProxyUpdatePort(t *testing.T) {
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
p.OnServiceAdd(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 0,
|
||||
Protocol: "UDP",
|
||||
@ -818,11 +818,11 @@ func TestUDPProxyUpdatePort(t *testing.T) {
|
||||
func TestProxyUpdatePublicIPs(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
@ -841,10 +841,10 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
p.OnServiceAdd(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
@ -870,11 +870,11 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
|
||||
func TestProxyUpdatePortal(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := &api.Endpoints{
|
||||
endpoint := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
@ -894,18 +894,18 @@ func TestProxyUpdatePortal(t *testing.T) {
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
svcv0 := &api.Service{
|
||||
svcv0 := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
|
||||
svcv1 := &api.Service{
|
||||
svcv1 := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: "", Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
@ -918,9 +918,9 @@ func TestProxyUpdatePortal(t *testing.T) {
|
||||
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
|
||||
}
|
||||
|
||||
svcv2 := &api.Service{
|
||||
svcv2 := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: "None", Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "TCP",
|
||||
@ -932,9 +932,9 @@ func TestProxyUpdatePortal(t *testing.T) {
|
||||
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
|
||||
}
|
||||
|
||||
svcv3 := &api.Service{
|
||||
svcv3 := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
|
86
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go
generated
vendored
86
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go
generated
vendored
@ -26,11 +26,11 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/miekg/dns"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/ipconfig"
|
||||
"k8s.io/utils/exec"
|
||||
@ -78,7 +78,7 @@ type proxySocket interface {
|
||||
ListenPort() int
|
||||
}
|
||||
|
||||
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (proxySocket, error) {
|
||||
func newProxySocket(protocol v1.Protocol, ip net.IP, port int) (proxySocket, error) {
|
||||
host := ""
|
||||
if ip != nil {
|
||||
host = ip.String()
|
||||
@ -101,6 +101,8 @@ func newProxySocket(protocol api.Protocol, ip net.IP, port int) (proxySocket, er
|
||||
return nil, err
|
||||
}
|
||||
return &udpProxySocket{UDPConn: conn, port: port}, nil
|
||||
case "SCTP":
|
||||
return nil, fmt.Errorf("SCTP is not supported for user space proxy")
|
||||
}
|
||||
return nil, fmt.Errorf("unknown protocol %q", protocol)
|
||||
}
|
||||
@ -131,10 +133,10 @@ func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string
|
||||
}
|
||||
endpoint, err := proxier.loadBalancer.NextEndpoint(servicePortName, srcAddr, sessionAffinityReset)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
|
||||
klog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
|
||||
klog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
|
||||
@ -142,7 +144,7 @@ func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Dial failed: " + err.Error())
|
||||
}
|
||||
glog.Errorf("Dial failed: %v", err)
|
||||
klog.Errorf("Dial failed: %v", err)
|
||||
sessionAffinityReset = true
|
||||
continue
|
||||
}
|
||||
@ -171,13 +173,13 @@ func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
|
||||
// Then the service port was just closed so the accept failure is to be expected.
|
||||
return
|
||||
}
|
||||
glog.Errorf("Accept failed: %v", err)
|
||||
klog.Errorf("Accept failed: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
|
||||
klog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
|
||||
outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to balancer: %v", err)
|
||||
klog.Errorf("Failed to connect to balancer: %v", err)
|
||||
inConn.Close()
|
||||
continue
|
||||
}
|
||||
@ -190,7 +192,7 @@ func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
|
||||
func proxyTCP(in, out *net.TCPConn) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
|
||||
klog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
|
||||
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
|
||||
go copyBytes("from backend", in, out, &wg)
|
||||
go copyBytes("to backend", out, in, &wg)
|
||||
@ -199,14 +201,14 @@ func proxyTCP(in, out *net.TCPConn) {
|
||||
|
||||
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
klog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
n, err := io.Copy(dest, src)
|
||||
if err != nil {
|
||||
if !isClosedError(err) {
|
||||
glog.Errorf("I/O error: %v", err)
|
||||
klog.Errorf("I/O error: %v", err)
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
klog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
dest.Close()
|
||||
src.Close()
|
||||
}
|
||||
@ -281,7 +283,7 @@ func appendDNSSuffix(msg *dns.Msg, buffer []byte, length int, dnsSuffix string)
|
||||
msg.Question[0].Name = origName
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to pack DNS packet. Error is: %v", err)
|
||||
klog.Warningf("Unable to pack DNS packet. Error is: %v", err)
|
||||
return length, err
|
||||
}
|
||||
|
||||
@ -308,7 +310,7 @@ func recoverDNSQuestion(origName string, msg *dns.Msg, buffer []byte, length int
|
||||
mbuf, err := msg.PackBuffer(buffer)
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to pack DNS packet. Error is: %v", err)
|
||||
klog.Warningf("Unable to pack DNS packet. Error is: %v", err)
|
||||
return length, err
|
||||
}
|
||||
|
||||
@ -328,7 +330,7 @@ func processUnpackedDNSQueryPacket(
|
||||
length int,
|
||||
dnsSearch []string) int {
|
||||
if dnsSearch == nil || len(dnsSearch) == 0 {
|
||||
glog.V(1).Infof("DNS search list is not initialized and is empty.")
|
||||
klog.V(1).Infof("DNS search list is not initialized and is empty.")
|
||||
return length
|
||||
}
|
||||
|
||||
@ -346,13 +348,13 @@ func processUnpackedDNSQueryPacket(
|
||||
state.msg.MsgHdr.Id = msg.MsgHdr.Id
|
||||
|
||||
if index < 0 || index >= int32(len(dnsSearch)) {
|
||||
glog.V(1).Infof("Search index %d is out of range.", index)
|
||||
klog.V(1).Infof("Search index %d is out of range.", index)
|
||||
return length
|
||||
}
|
||||
|
||||
length, err := appendDNSSuffix(msg, buffer, length, dnsSearch[index])
|
||||
if err != nil {
|
||||
glog.Errorf("Append DNS suffix failed: %v", err)
|
||||
klog.Errorf("Append DNS suffix failed: %v", err)
|
||||
}
|
||||
|
||||
return length
|
||||
@ -371,7 +373,7 @@ func processUnpackedDNSResponsePacket(
|
||||
var drop bool
|
||||
var err error
|
||||
if dnsSearch == nil || len(dnsSearch) == 0 {
|
||||
glog.V(1).Infof("DNS search list is not initialized and is empty.")
|
||||
klog.V(1).Infof("DNS search list is not initialized and is empty.")
|
||||
return drop, length
|
||||
}
|
||||
|
||||
@ -387,19 +389,19 @@ func processUnpackedDNSResponsePacket(
|
||||
drop = true
|
||||
length, err = appendDNSSuffix(state.msg, buffer, length, dnsSearch[index])
|
||||
if err != nil {
|
||||
glog.Errorf("Append DNS suffix failed: %v", err)
|
||||
klog.Errorf("Append DNS suffix failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = svrConn.Write(buffer[0:length])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Write failed: %v", err)
|
||||
klog.Errorf("Write failed: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
length, err = recoverDNSQuestion(state.msg.Question[0].Name, msg, buffer, length)
|
||||
if err != nil {
|
||||
glog.Errorf("Recover DNS question failed: %v", err)
|
||||
klog.Errorf("Recover DNS question failed: %v", err)
|
||||
}
|
||||
|
||||
dnsClients.mu.Lock()
|
||||
@ -419,7 +421,7 @@ func processDNSQueryPacket(
|
||||
dnsSearch []string) (int, error) {
|
||||
msg := &dns.Msg{}
|
||||
if err := msg.Unpack(buffer[:length]); err != nil {
|
||||
glog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
|
||||
klog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
|
||||
return length, err
|
||||
}
|
||||
|
||||
@ -430,14 +432,14 @@ func processDNSQueryPacket(
|
||||
|
||||
// QDCOUNT
|
||||
if len(msg.Question) != 1 {
|
||||
glog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question))
|
||||
glog.V(1).Infof("DNS suffix appending does not support more than one question.")
|
||||
klog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question))
|
||||
klog.V(1).Infof("DNS suffix appending does not support more than one question.")
|
||||
return length, nil
|
||||
}
|
||||
|
||||
// ANCOUNT, NSCOUNT, ARCOUNT
|
||||
if len(msg.Answer) != 0 || len(msg.Ns) != 0 || len(msg.Extra) != 0 {
|
||||
glog.V(1).Infof("DNS packet contains more than question section.")
|
||||
klog.V(1).Infof("DNS packet contains more than question section.")
|
||||
return length, nil
|
||||
}
|
||||
|
||||
@ -446,7 +448,7 @@ func processDNSQueryPacket(
|
||||
if packetRequiresDNSSuffix(dnsQType, dnsQClass) {
|
||||
host, _, err := net.SplitHostPort(cliAddr.String())
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to get host from client address: %v", err)
|
||||
klog.V(1).Infof("Failed to get host from client address: %v", err)
|
||||
host = cliAddr.String()
|
||||
}
|
||||
|
||||
@ -466,7 +468,7 @@ func processDNSResponsePacket(
|
||||
var drop bool
|
||||
msg := &dns.Msg{}
|
||||
if err := msg.Unpack(buffer[:length]); err != nil {
|
||||
glog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
|
||||
klog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
|
||||
return drop, length, err
|
||||
}
|
||||
|
||||
@ -477,7 +479,7 @@ func processDNSResponsePacket(
|
||||
|
||||
// QDCOUNT
|
||||
if len(msg.Question) != 1 {
|
||||
glog.V(1).Infof("Number of entries in the response section of the DNS packet is: %d", len(msg.Answer))
|
||||
klog.V(1).Infof("Number of entries in the response section of the DNS packet is: %d", len(msg.Answer))
|
||||
return drop, length, nil
|
||||
}
|
||||
|
||||
@ -486,7 +488,7 @@ func processDNSResponsePacket(
|
||||
if packetRequiresDNSSuffix(dnsQType, dnsQClass) {
|
||||
host, _, err := net.SplitHostPort(cliAddr.String())
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to get host from client address: %v", err)
|
||||
klog.V(1).Infof("Failed to get host from client address: %v", err)
|
||||
host = cliAddr.String()
|
||||
}
|
||||
|
||||
@ -503,7 +505,7 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
|
||||
dnsSearch = []string{"", namespaceServiceDomain, serviceDomain, clusterDomain}
|
||||
execer := exec.New()
|
||||
ipconfigInterface := ipconfig.New(execer)
|
||||
suffixList, err := ipconfigInterface.GetDnsSuffixSearchList()
|
||||
suffixList, err := ipconfigInterface.GetDNSSuffixSearchList()
|
||||
if err == nil {
|
||||
for _, suffix := range suffixList {
|
||||
dnsSearch = append(dnsSearch, suffix)
|
||||
@ -523,11 +525,11 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok {
|
||||
if e.Temporary() {
|
||||
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
|
||||
klog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
|
||||
klog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
|
||||
break
|
||||
}
|
||||
|
||||
@ -535,7 +537,7 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
|
||||
if isDNSService(service.Port) {
|
||||
n, err = processDNSQueryPacket(myInfo.dnsClients, cliAddr, buffer[:], n, dnsSearch)
|
||||
if err != nil {
|
||||
glog.Errorf("Process DNS query packet failed: %v", err)
|
||||
klog.Errorf("Process DNS query packet failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -549,14 +551,14 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
|
||||
_, err = svrConn.Write(buffer[0:n])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Write failed: %v", err)
|
||||
klog.Errorf("Write failed: %v", err)
|
||||
// TODO: Maybe tear down the goroutine for this client/server pair?
|
||||
}
|
||||
continue
|
||||
}
|
||||
err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
klog.Errorf("SetDeadline failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -570,14 +572,14 @@ func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, dnsClients
|
||||
if !found {
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
glog.V(3).Infof("New UDP connection from %s", cliAddr)
|
||||
klog.V(3).Infof("New UDP connection from %s", cliAddr)
|
||||
var err error
|
||||
svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
klog.Errorf("SetDeadline failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
activeClients.clients[cliAddr.String()] = svrConn
|
||||
@ -598,7 +600,7 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ
|
||||
n, err := svrConn.Read(buffer[0:])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Read failed: %v", err)
|
||||
klog.Errorf("Read failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
@ -607,20 +609,20 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ
|
||||
if isDNSService(service.Port) {
|
||||
drop, n, err = processDNSResponsePacket(svrConn, dnsClients, cliAddr, buffer[:], n, dnsSearch)
|
||||
if err != nil {
|
||||
glog.Errorf("Process DNS response packet failed: %v", err)
|
||||
klog.Errorf("Process DNS response packet failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !drop {
|
||||
err = svrConn.SetDeadline(time.Now().Add(timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
klog.Errorf("SetDeadline failed: %v", err)
|
||||
break
|
||||
}
|
||||
n, err = udp.WriteTo(buffer[0:n], cliAddr)
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("WriteTo failed: %v", err)
|
||||
klog.Errorf("WriteTo failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
54
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go
generated
vendored
@ -25,9 +25,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
)
|
||||
@ -46,7 +46,7 @@ type affinityState struct {
|
||||
}
|
||||
|
||||
type affinityPolicy struct {
|
||||
affinityType api.ServiceAffinity
|
||||
affinityType v1.ServiceAffinity
|
||||
affinityMap map[string]*affinityState // map client IP -> affinity info
|
||||
ttlSeconds int
|
||||
}
|
||||
@ -66,7 +66,7 @@ type balancerState struct {
|
||||
affinity affinityPolicy
|
||||
}
|
||||
|
||||
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlSeconds int) *affinityPolicy {
|
||||
func newAffinityPolicy(affinityType v1.ServiceAffinity, ttlSeconds int) *affinityPolicy {
|
||||
return &affinityPolicy{
|
||||
affinityType: affinityType,
|
||||
affinityMap: make(map[string]*affinityState),
|
||||
@ -81,8 +81,8 @@ func NewLoadBalancerRR() *LoadBalancerRR {
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) error {
|
||||
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
|
||||
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error {
|
||||
klog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
|
||||
@ -90,14 +90,14 @@ func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType
|
||||
}
|
||||
|
||||
// This assumes that lb.lock is already held.
|
||||
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) *balancerState {
|
||||
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) *balancerState {
|
||||
if ttlSeconds == 0 {
|
||||
ttlSeconds = int(api.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
|
||||
ttlSeconds = int(v1.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
|
||||
}
|
||||
|
||||
if _, exists := lb.services[svcPort]; !exists {
|
||||
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
|
||||
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
|
||||
klog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
|
||||
} else if affinityType != "" {
|
||||
lb.services[svcPort].affinity.affinityType = affinityType
|
||||
}
|
||||
@ -105,7 +105,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
|
||||
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
|
||||
klog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
delete(lb.services, svcPort)
|
||||
@ -114,7 +114,7 @@ func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
|
||||
// return true if this service is using some form of session affinity.
|
||||
func isSessionAffinity(affinity *affinityPolicy) bool {
|
||||
// Should never be empty string, but checking for it to be safe.
|
||||
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
|
||||
if affinity.affinityType == "" || affinity.affinityType == v1.ServiceAffinityNone {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@ -135,7 +135,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
|
||||
if len(state.endpoints) == 0 {
|
||||
return "", ErrMissingEndpoints
|
||||
}
|
||||
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
|
||||
klog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
|
||||
|
||||
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
|
||||
|
||||
@ -153,7 +153,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
|
||||
// Affinity wins.
|
||||
endpoint := sessionAffinity.endpoint
|
||||
sessionAffinity.lastUsed = time.Now()
|
||||
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
|
||||
klog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
|
||||
return endpoint, nil
|
||||
}
|
||||
}
|
||||
@ -172,7 +172,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
|
||||
affinity.lastUsed = time.Now()
|
||||
affinity.endpoint = endpoint
|
||||
affinity.clientIP = ipaddr
|
||||
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
|
||||
klog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
@ -204,7 +204,7 @@ func flattenValidEndpoints(endpoints []hostPortPair) []string {
|
||||
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
|
||||
for _, affinity := range state.affinity.affinityMap {
|
||||
if affinity.endpoint == endpoint {
|
||||
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
|
||||
klog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
|
||||
delete(state.affinity.affinityMap, affinity.clientIP)
|
||||
}
|
||||
}
|
||||
@ -227,7 +227,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn
|
||||
}
|
||||
for mKey, mVal := range allEndpoints {
|
||||
if mVal == 1 {
|
||||
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
|
||||
klog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
|
||||
removeSessionAffinityByEndpoint(state, svcPort, mKey)
|
||||
}
|
||||
}
|
||||
@ -235,7 +235,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn
|
||||
|
||||
// buildPortsToEndpointsMap builds a map of portname -> all ip:ports for that
|
||||
// portname. Explode Endpoints.Subsets[*] into this structure.
|
||||
func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPair {
|
||||
func buildPortsToEndpointsMap(endpoints *v1.Endpoints) map[string][]hostPortPair {
|
||||
portsToEndpoints := map[string][]hostPortPair{}
|
||||
for i := range endpoints.Subsets {
|
||||
ss := &endpoints.Subsets[i]
|
||||
@ -251,7 +251,7 @@ func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPai
|
||||
return portsToEndpoints
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
|
||||
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) {
|
||||
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
|
||||
|
||||
lb.lock.Lock()
|
||||
@ -263,13 +263,13 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
|
||||
state, exists := lb.services[svcPort]
|
||||
|
||||
if !exists || state == nil || len(newEndpoints) > 0 {
|
||||
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
||||
// OnEndpointsAdd can be called without NewService being called externally.
|
||||
// To be safe we will call it here. A new service will only be created
|
||||
// if one does not already exist. The affinity will be updated
|
||||
// later, once NewService is called.
|
||||
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
|
||||
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
|
||||
state.endpoints = slice.ShuffleStrings(newEndpoints)
|
||||
|
||||
// Reset the round-robin index.
|
||||
@ -278,7 +278,7 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
|
||||
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
|
||||
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
|
||||
oldPortsToEndpoints := buildPortsToEndpointsMap(oldEndpoints)
|
||||
registeredEndpoints := make(map[proxy.ServicePortName]bool)
|
||||
@ -297,13 +297,13 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoin
|
||||
}
|
||||
|
||||
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
|
||||
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
||||
// OnEndpointsUpdate can be called without NewService being called externally.
|
||||
// To be safe we will call it here. A new service will only be created
|
||||
// if one does not already exist. The affinity will be updated
|
||||
// later, once NewService is called.
|
||||
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
|
||||
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
|
||||
state.endpoints = slice.ShuffleStrings(newEndpoints)
|
||||
|
||||
// Reset the round-robin index.
|
||||
@ -315,7 +315,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoin
|
||||
for portname := range oldPortsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
||||
if _, exists := registeredEndpoints[svcPort]; !exists {
|
||||
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
|
||||
klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
|
||||
// Reset but don't delete.
|
||||
state := lb.services[svcPort]
|
||||
state.endpoints = []string{}
|
||||
@ -325,7 +325,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoin
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
|
||||
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *v1.Endpoints) {
|
||||
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
|
||||
|
||||
lb.lock.Lock()
|
||||
@ -333,7 +333,7 @@ func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
||||
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
|
||||
klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
|
||||
// If the service is still around, reset but don't delete.
|
||||
if state, ok := lb.services[svcPort]; ok {
|
||||
state.endpoints = []string{}
|
||||
@ -367,7 +367,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa
|
||||
}
|
||||
for ip, affinity := range state.affinity.affinityMap {
|
||||
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
|
||||
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
|
||||
klog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
|
||||
delete(state.affinity.affinityMap, ip)
|
||||
}
|
||||
}
|
||||
|
176
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin_test.go
generated
vendored
176
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin_test.go
generated
vendored
@ -20,9 +20,9 @@ import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
@ -104,11 +104,11 @@ func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := &api.Endpoints{
|
||||
endpoints := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 40}},
|
||||
}},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
@ -141,11 +141,11 @@ func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := &api.Endpoints{
|
||||
endpoints := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
|
||||
}},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
@ -168,16 +168,16 @@ func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := &api.Endpoints{
|
||||
endpoints := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -210,20 +210,20 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpointsv1 := &api.Endpoints{
|
||||
endpointsv1 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint2"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -249,16 +249,16 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
|
||||
// Then update the configuration with one fewer endpoints, make sure
|
||||
// we start in the beginning again
|
||||
endpointsv2 := &api.Endpoints{
|
||||
endpointsv2 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint5"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -283,7 +283,7 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
|
||||
// Clear endpoints
|
||||
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
|
||||
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
||||
|
||||
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
@ -300,21 +300,21 @@ func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints1 := &api.Endpoints{
|
||||
endpoints1 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 123}},
|
||||
},
|
||||
},
|
||||
}
|
||||
endpoints2 := &api.Endpoints{
|
||||
endpoints2 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: 456}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -357,13 +357,13 @@ func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
|
||||
}
|
||||
|
||||
// Call NewService() before OnEndpointsUpdate()
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints := &api.Endpoints{
|
||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
@ -413,15 +413,15 @@ func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
|
||||
}
|
||||
|
||||
// Call OnEndpointsUpdate() before NewService()
|
||||
endpoints := &api.Endpoints{
|
||||
endpoints := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
@ -473,13 +473,13 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpointsv1 := &api.Endpoints{
|
||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
||||
endpointsv1 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -494,12 +494,12 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
||||
client3Endpoint := shuffledEndpoints[2]
|
||||
|
||||
endpointsv2 := &api.Endpoints{
|
||||
endpointsv2 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -516,12 +516,12 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
|
||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
||||
|
||||
endpointsv3 := &api.Endpoints{
|
||||
endpointsv3 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -546,13 +546,13 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpointsv1 := &api.Endpoints{
|
||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
||||
endpointsv1 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -567,12 +567,12 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
// Then update the configuration with one fewer endpoints, make sure
|
||||
// we start in the beginning again
|
||||
endpointsv2 := &api.Endpoints{
|
||||
endpointsv2 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -586,7 +586,7 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
|
||||
// Clear endpoints
|
||||
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
|
||||
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
||||
|
||||
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
|
||||
@ -605,24 +605,24 @@ func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints1 := &api.Endpoints{
|
||||
loadBalancer.NewService(fooService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints1 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
|
||||
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints2 := &api.Endpoints{
|
||||
loadBalancer.NewService(barService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints2 := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -674,13 +674,13 @@ func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
|
||||
}
|
||||
|
||||
// Call NewService() before OnEndpointsUpdate()
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints := &api.Endpoints{
|
||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
|
Reference in New Issue
Block a user