vendor cleanup: remove unused,non-go and test files

This commit is contained in:
Madhu Rajanna
2019-01-16 00:05:52 +05:30
parent 52cf4aa902
commit b10ba188e7
15421 changed files with 17 additions and 4208853 deletions

View File

@ -1,70 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"endpoints.go",
"service.go",
"types.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy",
deps = [
"//pkg/api/service:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/util/net:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/proxy/apis/kubeproxyconfig:all-srcs",
"//pkg/proxy/config:all-srcs",
"//pkg/proxy/healthcheck:all-srcs",
"//pkg/proxy/iptables:all-srcs",
"//pkg/proxy/ipvs:all-srcs",
"//pkg/proxy/metrics:all-srcs",
"//pkg/proxy/userspace:all-srcs",
"//pkg/proxy/util:all-srcs",
"//pkg/proxy/winkernel:all-srcs",
"//pkg/proxy/winuserspace:all-srcs",
],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = [
"endpoints_test.go",
"service_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)

View File

@ -1,14 +0,0 @@
approvers:
- thockin
- matchstick
reviewers:
- thockin
- lavalamp
- smarterclayton
- brendandburns
- vishh
- justinsb
- freehan
- dcbw
- m1093782566
- danwinship

View File

@ -1,41 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"register.go",
"types.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig",
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/proxy/apis/kubeproxyconfig/fuzzer:all-srcs",
"//pkg/proxy/apis/kubeproxyconfig/scheme:all-srcs",
"//pkg/proxy/apis/kubeproxyconfig/v1alpha1:all-srcs",
"//pkg/proxy/apis/kubeproxyconfig/validation:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,4 +0,0 @@
approvers:
- thockin
reviewers:
- sig-network-reviewers

View File

@ -1,19 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package kubeproxyconfig // import "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"

View File

@ -1,29 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["fuzzer.go"],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/fuzzer",
visibility = ["//visibility:public"],
deps = [
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
"//pkg/util/pointer:go_default_library",
"//vendor/github.com/google/gofuzz:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,52 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"fmt"
"time"
"github.com/google/gofuzz"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
)
// Funcs returns the fuzzer functions for the kube-proxy apis.
func Funcs(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(obj *kubeproxyconfig.KubeProxyConfiguration, c fuzz.Continue) {
c.FuzzNoCustom(obj)
obj.BindAddress = fmt.Sprintf("%d.%d.%d.%d", c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(256))
obj.ClientConnection.ContentType = c.RandString()
obj.Conntrack.MaxPerCore = utilpointer.Int32Ptr(c.Int31())
obj.Conntrack.Min = utilpointer.Int32Ptr(c.Int31())
obj.Conntrack.TCPCloseWaitTimeout = &metav1.Duration{Duration: time.Duration(c.Int63()) * time.Hour}
obj.Conntrack.TCPEstablishedTimeout = &metav1.Duration{Duration: time.Duration(c.Int63()) * time.Hour}
obj.FeatureGates = map[string]bool{c.RandString(): true}
obj.HealthzBindAddress = fmt.Sprintf("%d.%d.%d.%d:%d", c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(65536))
obj.IPTables.MasqueradeBit = utilpointer.Int32Ptr(c.Int31())
obj.MetricsBindAddress = fmt.Sprintf("%d.%d.%d.%d:%d", c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(65536))
obj.OOMScoreAdj = utilpointer.Int32Ptr(c.Int31())
obj.ResourceContainer = "foo"
obj.ClientConnection.ContentType = "bar"
obj.NodePortAddresses = []string{"1.2.3.0/24"}
},
}
}

View File

@ -1,51 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeproxyconfig
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name use in this package
const GroupName = "kubeproxy.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this will get cleaned up with the scheme types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&KubeProxyConfiguration{},
)
return nil
}

View File

@ -1,38 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["scheme.go"],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme",
visibility = ["//visibility:public"],
deps = [
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["scheme_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/proxy/apis/kubeproxyconfig/fuzzer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library",
],
)

View File

@ -1,42 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
)
var (
// Scheme defines methods for serializing and deserializing API objects.
Scheme = runtime.NewScheme()
// Codecs provides methods for retrieving codecs and serializers for specific
// versions and content types.
Codecs = serializer.NewCodecFactory(Scheme)
)
func init() {
AddToScheme(Scheme)
}
// AddToScheme adds the types of this group into the given scheme.
func AddToScheme(scheme *runtime.Scheme) {
v1alpha1.AddToScheme(scheme)
kubeproxyconfig.AddToScheme(scheme)
}

View File

@ -1,28 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"testing"
"k8s.io/apimachinery/pkg/api/testing/roundtrip"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/fuzzer"
)
func TestRoundTripTypes(t *testing.T) {
roundtrip.RoundTripTestForScheme(t, Scheme, fuzzer.Funcs)
}

View File

@ -1,268 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeproxyconfig
import (
"fmt"
"sort"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClientConnectionConfiguration contains details for constructing a client.
type ClientConnectionConfiguration struct {
// kubeconfig is the path to a kubeconfig file.
KubeConfigFile string
// acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
// default value of 'application/json'. This field will control all connections to the server used by a particular
// client.
AcceptContentTypes string
// contentType is the content type used when sending data to the server from this client.
ContentType string
// qps controls the number of queries per second allowed for this connection.
QPS float32
// burst allows extra queries to accumulate when a client is exceeding its rate.
Burst int32
}
// KubeProxyIPTablesConfiguration contains iptables-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPTablesConfiguration struct {
// masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
// the pure iptables proxy mode. Values must be within the range [0, 31].
MasqueradeBit *int32
// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
MasqueradeAll bool
// syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
SyncPeriod metav1.Duration
// minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m').
MinSyncPeriod metav1.Duration
}
// KubeProxyIPVSConfiguration contains ipvs-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPVSConfiguration struct {
// syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
SyncPeriod metav1.Duration
// minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m',
// '2h22m').
MinSyncPeriod metav1.Duration
// ipvs scheduler
Scheduler string
// excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch
// when cleaning up ipvs services.
ExcludeCIDRs []string
}
// KubeProxyConntrackConfiguration contains conntrack settings for
// the Kubernetes proxy server.
type KubeProxyConntrackConfiguration struct {
// max is the maximum number of NAT connections to track (0 to
// leave as-is). This takes precedence over maxPerCore and min.
Max *int32
// maxPerCore is the maximum number of NAT connections to track
// per CPU core (0 to leave the limit as-is and ignore min).
MaxPerCore *int32
// min is the minimum value of connect-tracking records to allocate,
// regardless of maxPerCore (set maxPerCore=0 to leave the limit as-is).
Min *int32
// tcpEstablishedTimeout is how long an idle TCP connection will be kept open
// (e.g. '2s'). Must be greater than 0 to set.
TCPEstablishedTimeout *metav1.Duration
// tcpCloseWaitTimeout is how long an idle conntrack entry
// in CLOSE_WAIT state will remain in the conntrack
// table. (e.g. '60s'). Must be greater than 0 to set.
TCPCloseWaitTimeout *metav1.Duration
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KubeProxyConfiguration contains everything necessary to configure the
// Kubernetes proxy server.
type KubeProxyConfiguration struct {
metav1.TypeMeta
// featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
FeatureGates map[string]bool
// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
// for all interfaces)
BindAddress string
// healthzBindAddress is the IP address and port for the health check server to serve on,
// defaulting to 0.0.0.0:10256
HealthzBindAddress string
// metricsBindAddress is the IP address and port for the metrics server to serve on,
// defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)
MetricsBindAddress string
// enableProfiling enables profiling via web interface on /debug/pprof handler.
// Profiling handlers will be handled by metrics server.
EnableProfiling bool
// clusterCIDR is the CIDR range of the pods in the cluster. It is used to
// bridge traffic coming from outside of the cluster. If not provided,
// no off-cluster bridging will be performed.
ClusterCIDR string
// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
HostnameOverride string
// clientConnection specifies the kubeconfig file and client connection settings for the proxy
// server to use when communicating with the apiserver.
ClientConnection ClientConnectionConfiguration
// iptables contains iptables-related configuration options.
IPTables KubeProxyIPTablesConfiguration
// ipvs contains ipvs-related configuration options.
IPVS KubeProxyIPVSConfiguration
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
// the range [-1000, 1000]
OOMScoreAdj *int32
// mode specifies which proxy mode to use.
Mode ProxyMode
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
PortRange string
// resourceContainer is the absolute name of the resource-only container to create and run
// the Kube-proxy in (Default: /kube-proxy).
ResourceContainer string
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
// Must be greater than 0. Only applicable for proxyMode=userspace.
UDPIdleTimeout metav1.Duration
// conntrack contains conntrack-related configuration options.
Conntrack KubeProxyConntrackConfiguration
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
// than 0.
ConfigSyncPeriod metav1.Duration
// nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid
// IP blocks. These values are as a parameter to select the interfaces where nodeport works.
// In case someone would like to expose a service on localhost for local visit and some other interfaces for
// particular purpose, a list of IP blocks would do that.
// If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort.
// If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node.
// An empty string slice is meant to select all network interfaces.
NodePortAddresses []string
}
// Currently, three modes of proxy are available in Linux platform: 'userspace' (older, going to be EOL), 'iptables'
// (newer, faster), 'ipvs'(newest, better in performance and scalability).
//
// Two modes of proxy are available in Windows platform: 'userspace'(older, stable) and 'kernelspace' (newer, faster).
//
// In Linux platform, if proxy mode is blank, use the best-available proxy (currently iptables, but may change in the
// future). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are
// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs',
// and the fall back path is firstly iptables and then userspace.
// In Windows platform, if proxy mode is blank, use the best-available proxy (currently userspace, but may change in the
// future). If winkernel proxy is selected, regardless of how, but the Windows kernel can't support this mode of proxy,
// this always falls back to the userspace proxy.
type ProxyMode string
const (
ProxyModeUserspace ProxyMode = "userspace"
ProxyModeIPTables ProxyMode = "iptables"
ProxyModeIPVS ProxyMode = "ipvs"
ProxyModeKernelspace ProxyMode = "kernelspace"
)
// IPVSSchedulerMethod is the algorithm for allocating TCP connections and
// UDP datagrams to real servers. Scheduling algorithms are imple-
//wanted as kernel modules. Ten are shipped with the Linux Virtual Server.
type IPVSSchedulerMethod string
const (
// RoundRobin distributes jobs equally amongst the available real servers.
RoundRobin IPVSSchedulerMethod = "rr"
// WeightedRoundRobin assigns jobs to real servers proportionally to there real servers' weight.
// Servers with higher weights receive new jobs first and get more jobs than servers with lower weights.
// Servers with equal weights get an equal distribution of new jobs.
WeightedRoundRobin IPVSSchedulerMethod = "wrr"
// LeastConnection assigns more jobs to real servers with fewer active jobs.
LeastConnection IPVSSchedulerMethod = "lc"
// WeightedLeastConnection assigns more jobs to servers with fewer jobs and
// relative to the real servers' weight(Ci/Wi).
WeightedLeastConnection IPVSSchedulerMethod = "wlc"
// LocalityBasedLeastConnection assigns jobs destined for the same IP address to the same server if
// the server is not overloaded and available; otherwise assigns jobs to servers with fewer jobs,
// and keep it for future assignment.
LocalityBasedLeastConnection IPVSSchedulerMethod = "lblc"
// LocalityBasedLeastConnectionWithReplication with Replication assigns jobs destined for the same IP address to the
// least-connection node in the server set for the IP address. If all the node in the server set are overloaded,
// it picks up a node with fewer jobs in the cluster and adds it to the sever set for the target.
// If the server set has not been modified for the specified time, the most loaded node is removed from the server set,
// in order to avoid high degree of replication.
LocalityBasedLeastConnectionWithReplication IPVSSchedulerMethod = "lblcr"
// SourceHashing assigns jobs to servers through looking up a statically assigned hash table
// by their source IP addresses.
SourceHashing IPVSSchedulerMethod = "sh"
// DestinationHashing assigns jobs to servers through looking up a statically assigned hash table
// by their destination IP addresses.
DestinationHashing IPVSSchedulerMethod = "dh"
// ShortestExpectedDelay assigns an incoming job to the server with the shortest expected delay.
// The expected delay that the job will experience is (Ci + 1) / Ui if sent to the ith server, in which
// Ci is the number of jobs on the the ith server and Ui is the fixed service rate (weight) of the ith server.
ShortestExpectedDelay IPVSSchedulerMethod = "sed"
// NeverQueue assigns an incoming job to an idle server if there is, instead of waiting for a fast one;
// if all the servers are busy, it adopts the ShortestExpectedDelay policy to assign the job.
NeverQueue IPVSSchedulerMethod = "nq"
)
func (m *ProxyMode) Set(s string) error {
*m = ProxyMode(s)
return nil
}
func (m *ProxyMode) String() string {
if m != nil {
return string(*m)
}
return ""
}
func (m *ProxyMode) Type() string {
return "ProxyMode"
}
type ConfigurationMap map[string]string
func (m *ConfigurationMap) String() string {
pairs := []string{}
for k, v := range *m {
pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(pairs)
return strings.Join(pairs, ",")
}
func (m *ConfigurationMap) Set(value string) error {
for _, s := range strings.Split(value, ",") {
if len(s) == 0 {
continue
}
arr := strings.SplitN(s, "=", 2)
if len(arr) == 2 {
(*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1])
} else {
(*m)[strings.TrimSpace(arr[0])] = ""
}
}
return nil
}
func (*ConfigurationMap) Type() string {
return "mapStringString"
}

View File

@ -1,43 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"defaults.go",
"doc.go",
"register.go",
"types.go",
"zz_generated.conversion.go",
"zz_generated.deepcopy.go",
"zz_generated.defaults.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1",
deps = [
"//pkg/kubelet/qos:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
"//pkg/util/pointer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,122 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/util/pointer"
)
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) {
if len(obj.BindAddress) == 0 {
obj.BindAddress = "0.0.0.0"
}
if obj.HealthzBindAddress == "" {
obj.HealthzBindAddress = fmt.Sprintf("0.0.0.0:%v", ports.ProxyHealthzPort)
} else if !strings.Contains(obj.HealthzBindAddress, ":") {
obj.HealthzBindAddress += fmt.Sprintf(":%v", ports.ProxyHealthzPort)
}
if obj.MetricsBindAddress == "" {
obj.MetricsBindAddress = fmt.Sprintf("127.0.0.1:%v", ports.ProxyStatusPort)
} else if !strings.Contains(obj.MetricsBindAddress, ":") {
obj.MetricsBindAddress += fmt.Sprintf(":%v", ports.ProxyStatusPort)
}
if obj.OOMScoreAdj == nil {
temp := int32(qos.KubeProxyOOMScoreAdj)
obj.OOMScoreAdj = &temp
}
if obj.ResourceContainer == "" {
obj.ResourceContainer = "/kube-proxy"
}
if obj.IPTables.SyncPeriod.Duration == 0 {
obj.IPTables.SyncPeriod = metav1.Duration{Duration: 30 * time.Second}
}
if obj.IPVS.SyncPeriod.Duration == 0 {
obj.IPVS.SyncPeriod = metav1.Duration{Duration: 30 * time.Second}
}
zero := metav1.Duration{}
if obj.UDPIdleTimeout == zero {
obj.UDPIdleTimeout = metav1.Duration{Duration: 250 * time.Millisecond}
}
// If ConntrackMax is set, respect it.
if obj.Conntrack.Max == nil {
// If ConntrackMax is *not* set, use per-core scaling.
if obj.Conntrack.MaxPerCore == nil {
obj.Conntrack.MaxPerCore = pointer.Int32Ptr(32 * 1024)
}
if obj.Conntrack.Min == nil {
obj.Conntrack.Min = pointer.Int32Ptr(128 * 1024)
}
}
if obj.IPTables.MasqueradeBit == nil {
temp := int32(14)
obj.IPTables.MasqueradeBit = &temp
}
if obj.Conntrack.TCPEstablishedTimeout == nil {
obj.Conntrack.TCPEstablishedTimeout = &metav1.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)
}
if obj.Conntrack.TCPCloseWaitTimeout == nil {
// See https://github.com/kubernetes/kubernetes/issues/32551.
//
// CLOSE_WAIT conntrack state occurs when the Linux kernel
// sees a FIN from the remote server. Note: this is a half-close
// condition that persists as long as the local side keeps the
// socket open. The condition is rare as it is typical in most
// protocols for both sides to issue a close; this typically
// occurs when the local socket is lazily garbage collected.
//
// If the CLOSE_WAIT conntrack entry expires, then FINs from the
// local socket will not be properly SNAT'd and will not reach the
// remote server (if the connection was subject to SNAT). If the
// remote timeouts for FIN_WAIT* states exceed the CLOSE_WAIT
// timeout, then there will be an inconsistency in the state of
// the connection and a new connection reusing the SNAT (src,
// port) pair may be rejected by the remote side with RST. This
// can cause new calls to connect(2) to return with ECONNREFUSED.
//
// We set CLOSE_WAIT to one hour by default to better match
// typical server timeouts.
obj.Conntrack.TCPCloseWaitTimeout = &metav1.Duration{Duration: 1 * time.Hour}
}
if obj.ConfigSyncPeriod.Duration == 0 {
obj.ConfigSyncPeriod.Duration = 15 * time.Minute
}
if len(obj.ClientConnection.ContentType) == 0 {
obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf"
}
if obj.ClientConnection.QPS == 0.0 {
obj.ClientConnection.QPS = 5.0
}
if obj.ClientConnection.Burst == 0 {
obj.ClientConnection.Burst = 10
}
if obj.FeatureGates == nil {
obj.FeatureGates = make(map[string]bool)
}
}

View File

@ -1,22 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
package v1alpha1 // import "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"

View File

@ -1,50 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "kubeproxy.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&KubeProxyConfiguration{},
)
return nil
}

View File

@ -1,169 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClientConnectionConfiguration contains details for constructing a client.
type ClientConnectionConfiguration struct {
// kubeconfig is the path to a kubeconfig file.
KubeConfigFile string `json:"kubeconfig"`
// acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
// default value of 'application/json'. This field will control all connections to the server used by a particular
// client.
AcceptContentTypes string `json:"acceptContentTypes"`
// contentType is the content type used when sending data to the server from this client.
ContentType string `json:"contentType"`
// qps controls the number of queries per second allowed for this connection.
QPS float32 `json:"qps"`
// burst allows extra queries to accumulate when a client is exceeding its rate.
Burst int `json:"burst"`
}
// KubeProxyIPTablesConfiguration contains iptables-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPTablesConfiguration struct {
// masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
// the pure iptables proxy mode. Values must be within the range [0, 31].
MasqueradeBit *int32 `json:"masqueradeBit"`
// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
MasqueradeAll bool `json:"masqueradeAll"`
// syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
SyncPeriod metav1.Duration `json:"syncPeriod"`
// minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m').
MinSyncPeriod metav1.Duration `json:"minSyncPeriod"`
}
// KubeProxyIPVSConfiguration contains ipvs-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPVSConfiguration struct {
// syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
SyncPeriod metav1.Duration `json:"syncPeriod"`
// minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m',
// '2h22m').
MinSyncPeriod metav1.Duration `json:"minSyncPeriod"`
// ipvs scheduler
Scheduler string `json:"scheduler"`
// excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch
// when cleaning up ipvs services.
ExcludeCIDRs []string `json:"excludeCIDRs"`
}
// KubeProxyConntrackConfiguration contains conntrack settings for
// the Kubernetes proxy server.
type KubeProxyConntrackConfiguration struct {
// max is the maximum number of NAT connections to track (0 to
// leave as-is). This takes precedence over maxPerCore and min.
Max *int32 `json:"max"`
// maxPerCore is the maximum number of NAT connections to track
// per CPU core (0 to leave the limit as-is and ignore min).
MaxPerCore *int32 `json:"maxPerCore"`
// min is the minimum value of connect-tracking records to allocate,
// regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is).
Min *int32 `json:"min"`
// tcpEstablishedTimeout is how long an idle TCP connection will be kept open
// (e.g. '2s'). Must be greater than 0 to set.
TCPEstablishedTimeout *metav1.Duration `json:"tcpEstablishedTimeout"`
// tcpCloseWaitTimeout is how long an idle conntrack entry
// in CLOSE_WAIT state will remain in the conntrack
// table. (e.g. '60s'). Must be greater than 0 to set.
TCPCloseWaitTimeout *metav1.Duration `json:"tcpCloseWaitTimeout"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KubeProxyConfiguration contains everything necessary to configure the
// Kubernetes proxy server.
type KubeProxyConfiguration struct {
metav1.TypeMeta `json:",inline"`
// featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
FeatureGates map[string]bool `json:"featureGates,omitempty"`
// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
// for all interfaces)
BindAddress string `json:"bindAddress"`
// healthzBindAddress is the IP address and port for the health check server to serve on,
// defaulting to 0.0.0.0:10256
HealthzBindAddress string `json:"healthzBindAddress"`
// metricsBindAddress is the IP address and port for the metrics server to serve on,
// defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)
MetricsBindAddress string `json:"metricsBindAddress"`
// enableProfiling enables profiling via web interface on /debug/pprof handler.
// Profiling handlers will be handled by metrics server.
EnableProfiling bool `json:"enableProfiling"`
// clusterCIDR is the CIDR range of the pods in the cluster. It is used to
// bridge traffic coming from outside of the cluster. If not provided,
// no off-cluster bridging will be performed.
ClusterCIDR string `json:"clusterCIDR"`
// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
HostnameOverride string `json:"hostnameOverride"`
// clientConnection specifies the kubeconfig file and client connection settings for the proxy
// server to use when communicating with the apiserver.
ClientConnection ClientConnectionConfiguration `json:"clientConnection"`
// iptables contains iptables-related configuration options.
IPTables KubeProxyIPTablesConfiguration `json:"iptables"`
// ipvs contains ipvs-related configuration options.
IPVS KubeProxyIPVSConfiguration `json:"ipvs"`
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
// the range [-1000, 1000]
OOMScoreAdj *int32 `json:"oomScoreAdj"`
// mode specifies which proxy mode to use.
Mode ProxyMode `json:"mode"`
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
PortRange string `json:"portRange"`
// resourceContainer is the bsolute name of the resource-only container to create and run
// the Kube-proxy in (Default: /kube-proxy).
ResourceContainer string `json:"resourceContainer"`
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
// Must be greater than 0. Only applicable for proxyMode=userspace.
UDPIdleTimeout metav1.Duration `json:"udpIdleTimeout"`
// conntrack contains conntrack-related configuration options.
Conntrack KubeProxyConntrackConfiguration `json:"conntrack"`
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
// than 0.
ConfigSyncPeriod metav1.Duration `json:"configSyncPeriod"`
// nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid
// IP blocks. These values are as a parameter to select the interfaces where nodeport works.
// In case someone would like to expose a service on localhost for local visit and some other interfaces for
// particular purpose, a list of IP blocks would do that.
// If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort.
// If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node.
// An empty string slice is meant to select all network interfaces.
NodePortAddresses []string `json:"nodePortAddresses"`
}
// Currently, three modes of proxy are available in Linux platform: 'userspace' (older, going to be EOL), 'iptables'
// (newer, faster), 'ipvs'(newest, better in performance and scalability).
//
// Two modes of proxy are available in Windows platform: 'userspace'(older, stable) and 'kernelspace' (newer, faster).
//
// In Linux platform, if proxy mode is blank, use the best-available proxy (currently iptables, but may change in the
// future). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are
// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs',
// and the fall back path is firstly iptables and then userspace.
// In Windows platform, if proxy mode is blank, use the best-available proxy (currently userspace, but may change in the
// future). If winkernel proxy is selected, regardless of how, but the Windows kernel can't support this mode of proxy,
// this always falls back to the userspace proxy.
type ProxyMode string

View File

@ -1,229 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(scheme *runtime.Scheme) error {
return scheme.AddGeneratedConversionFuncs(
Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration,
Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration,
Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration,
Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration,
Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration,
Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration,
Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration,
Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration,
Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration,
Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration,
)
}
func autoConvert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in *ClientConnectionConfiguration, out *kubeproxyconfig.ClientConnectionConfiguration, s conversion.Scope) error {
out.KubeConfigFile = in.KubeConfigFile
out.AcceptContentTypes = in.AcceptContentTypes
out.ContentType = in.ContentType
out.QPS = in.QPS
out.Burst = int32(in.Burst)
return nil
}
// Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in *ClientConnectionConfiguration, out *kubeproxyconfig.ClientConnectionConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in, out, s)
}
func autoConvert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in *kubeproxyconfig.ClientConnectionConfiguration, out *ClientConnectionConfiguration, s conversion.Scope) error {
out.KubeConfigFile = in.KubeConfigFile
out.AcceptContentTypes = in.AcceptContentTypes
out.ContentType = in.ContentType
out.QPS = in.QPS
out.Burst = int(in.Burst)
return nil
}
// Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration is an autogenerated conversion function.
func Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in *kubeproxyconfig.ClientConnectionConfiguration, out *ClientConnectionConfiguration, s conversion.Scope) error {
return autoConvert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *kubeproxyconfig.KubeProxyConfiguration, s conversion.Scope) error {
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
out.BindAddress = in.BindAddress
out.HealthzBindAddress = in.HealthzBindAddress
out.MetricsBindAddress = in.MetricsBindAddress
out.EnableProfiling = in.EnableProfiling
out.ClusterCIDR = in.ClusterCIDR
out.HostnameOverride = in.HostnameOverride
if err := Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
return err
}
if err := Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil {
return err
}
if err := Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil {
return err
}
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
out.Mode = kubeproxyconfig.ProxyMode(in.Mode)
out.PortRange = in.PortRange
out.ResourceContainer = in.ResourceContainer
out.UDPIdleTimeout = in.UDPIdleTimeout
if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
return err
}
out.ConfigSyncPeriod = in.ConfigSyncPeriod
out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses))
return nil
}
// Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *kubeproxyconfig.KubeProxyConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in, out, s)
}
func autoConvert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *kubeproxyconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error {
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
out.BindAddress = in.BindAddress
out.HealthzBindAddress = in.HealthzBindAddress
out.MetricsBindAddress = in.MetricsBindAddress
out.EnableProfiling = in.EnableProfiling
out.ClusterCIDR = in.ClusterCIDR
out.HostnameOverride = in.HostnameOverride
if err := Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
return err
}
if err := Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil {
return err
}
if err := Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil {
return err
}
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
out.Mode = ProxyMode(in.Mode)
out.PortRange = in.PortRange
out.ResourceContainer = in.ResourceContainer
out.UDPIdleTimeout = in.UDPIdleTimeout
if err := Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
return err
}
out.ConfigSyncPeriod = in.ConfigSyncPeriod
out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses))
return nil
}
// Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration is an autogenerated conversion function.
func Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *kubeproxyconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error {
return autoConvert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in *KubeProxyConntrackConfiguration, out *kubeproxyconfig.KubeProxyConntrackConfiguration, s conversion.Scope) error {
out.Max = (*int32)(unsafe.Pointer(in.Max))
out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore))
out.Min = (*int32)(unsafe.Pointer(in.Min))
out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout))
out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout))
return nil
}
// Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in *KubeProxyConntrackConfiguration, out *kubeproxyconfig.KubeProxyConntrackConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in, out, s)
}
func autoConvert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *kubeproxyconfig.KubeProxyConntrackConfiguration, out *KubeProxyConntrackConfiguration, s conversion.Scope) error {
out.Max = (*int32)(unsafe.Pointer(in.Max))
out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore))
out.Min = (*int32)(unsafe.Pointer(in.Min))
out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout))
out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout))
return nil
}
// Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration is an autogenerated conversion function.
func Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *kubeproxyconfig.KubeProxyConntrackConfiguration, out *KubeProxyConntrackConfiguration, s conversion.Scope) error {
return autoConvert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in *KubeProxyIPTablesConfiguration, out *kubeproxyconfig.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
out.MasqueradeAll = in.MasqueradeAll
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
return nil
}
// Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in *KubeProxyIPTablesConfiguration, out *kubeproxyconfig.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in, out, s)
}
func autoConvert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *kubeproxyconfig.KubeProxyIPTablesConfiguration, out *KubeProxyIPTablesConfiguration, s conversion.Scope) error {
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
out.MasqueradeAll = in.MasqueradeAll
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
return nil
}
// Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration is an autogenerated conversion function.
func Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *kubeproxyconfig.KubeProxyIPTablesConfiguration, out *KubeProxyIPTablesConfiguration, s conversion.Scope) error {
return autoConvert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in *KubeProxyIPVSConfiguration, out *kubeproxyconfig.KubeProxyIPVSConfiguration, s conversion.Scope) error {
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
out.Scheduler = in.Scheduler
out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs))
return nil
}
// Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in *KubeProxyIPVSConfiguration, out *kubeproxyconfig.KubeProxyIPVSConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in, out, s)
}
func autoConvert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *kubeproxyconfig.KubeProxyIPVSConfiguration, out *KubeProxyIPVSConfiguration, s conversion.Scope) error {
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
out.Scheduler = in.Scheduler
out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs))
return nil
}
// Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration is an autogenerated conversion function.
func Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *kubeproxyconfig.KubeProxyIPVSConfiguration, out *KubeProxyIPVSConfiguration, s conversion.Scope) error {
return autoConvert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in, out, s)
}

View File

@ -1,205 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration.
func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration {
if in == nil {
return nil
}
out := new(ClientConnectionConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]bool, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.ClientConnection = in.ClientConnection
in.IPTables.DeepCopyInto(&out.IPTables)
in.IPVS.DeepCopyInto(&out.IPVS)
if in.OOMScoreAdj != nil {
in, out := &in.OOMScoreAdj, &out.OOMScoreAdj
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
out.UDPIdleTimeout = in.UDPIdleTimeout
in.Conntrack.DeepCopyInto(&out.Conntrack)
out.ConfigSyncPeriod = in.ConfigSyncPeriod
if in.NodePortAddresses != nil {
in, out := &in.NodePortAddresses, &out.NodePortAddresses
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration.
func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) {
*out = *in
if in.Max != nil {
in, out := &in.Max, &out.Max
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.MaxPerCore != nil {
in, out := &in.MaxPerCore, &out.MaxPerCore
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.Min != nil {
in, out := &in.Min, &out.Min
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.TCPEstablishedTimeout != nil {
in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout
if *in == nil {
*out = nil
} else {
*out = new(v1.Duration)
**out = **in
}
}
if in.TCPCloseWaitTimeout != nil {
in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout
if *in == nil {
*out = nil
} else {
*out = new(v1.Duration)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration.
func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyConntrackConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) {
*out = *in
if in.MasqueradeBit != nil {
in, out := &in.MasqueradeBit, &out.MasqueradeBit
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration.
func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyIPTablesConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) {
*out = *in
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
if in.ExcludeCIDRs != nil {
in, out := &in.ExcludeCIDRs, &out.ExcludeCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration.
func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyIPVSConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@ -1,37 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&KubeProxyConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeProxyConfiguration(obj.(*KubeProxyConfiguration)) })
return nil
}
func SetObjectDefaults_KubeProxyConfiguration(in *KubeProxyConfiguration) {
SetDefaults_KubeProxyConfiguration(in)
}

View File

@ -1,45 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["validation.go"],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation",
deps = [
"//pkg/apis/core/validation:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["validation_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
"//pkg/util/pointer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
],
)

View File

@ -1,267 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"net"
"runtime"
"strconv"
"strings"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
)
// Validate validates the configuration of kube-proxy
func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList {
allErrs := field.ErrorList{}
newPath := field.NewPath("KubeProxyConfiguration")
allErrs = append(allErrs, validateKubeProxyIPTablesConfiguration(config.IPTables, newPath.Child("KubeProxyIPTablesConfiguration"))...)
if config.Mode == kubeproxyconfig.ProxyModeIPVS {
allErrs = append(allErrs, validateKubeProxyIPVSConfiguration(config.IPVS, newPath.Child("KubeProxyIPVSConfiguration"))...)
}
allErrs = append(allErrs, validateKubeProxyConntrackConfiguration(config.Conntrack, newPath.Child("KubeProxyConntrackConfiguration"))...)
allErrs = append(allErrs, validateProxyMode(config.Mode, newPath.Child("Mode"))...)
allErrs = append(allErrs, validateClientConnectionConfiguration(config.ClientConnection, newPath.Child("ClientConnection"))...)
if config.OOMScoreAdj != nil && (*config.OOMScoreAdj < -1000 || *config.OOMScoreAdj > 1000) {
allErrs = append(allErrs, field.Invalid(newPath.Child("OOMScoreAdj"), *config.OOMScoreAdj, "must be within the range [-1000, 1000]"))
}
if config.UDPIdleTimeout.Duration <= 0 {
allErrs = append(allErrs, field.Invalid(newPath.Child("UDPIdleTimeout"), config.UDPIdleTimeout, "must be greater than 0"))
}
if config.ConfigSyncPeriod.Duration <= 0 {
allErrs = append(allErrs, field.Invalid(newPath.Child("ConfigSyncPeriod"), config.ConfigSyncPeriod, "must be greater than 0"))
}
if net.ParseIP(config.BindAddress) == nil {
allErrs = append(allErrs, field.Invalid(newPath.Child("BindAddress"), config.BindAddress, "not a valid textual representation of an IP address"))
}
allErrs = append(allErrs, validateHostPort(config.HealthzBindAddress, newPath.Child("HealthzBindAddress"))...)
allErrs = append(allErrs, validateHostPort(config.MetricsBindAddress, newPath.Child("MetricsBindAddress"))...)
if config.ClusterCIDR != "" {
if _, _, err := net.ParseCIDR(config.ClusterCIDR); err != nil {
allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid CIDR block (e.g. 10.100.0.0/16)"))
}
}
if _, err := utilnet.ParsePortRange(config.PortRange); err != nil {
allErrs = append(allErrs, field.Invalid(newPath.Child("PortRange"), config.PortRange, "must be a valid port range (e.g. 300-2000)"))
}
allErrs = append(allErrs, validateKubeProxyNodePortAddress(config.NodePortAddresses, newPath.Child("NodePortAddresses"))...)
return allErrs
}
func validateKubeProxyIPTablesConfiguration(config kubeproxyconfig.KubeProxyIPTablesConfiguration, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if config.MasqueradeBit != nil && (*config.MasqueradeBit < 0 || *config.MasqueradeBit > 31) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("MasqueradeBit"), config.MasqueradeBit, "must be within the range [0, 31]"))
}
if config.SyncPeriod.Duration <= 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.SyncPeriod, "must be greater than 0"))
}
if config.MinSyncPeriod.Duration < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("MinSyncPeriod"), config.MinSyncPeriod, "must be greater than or equal to 0"))
}
if config.MinSyncPeriod.Duration > config.SyncPeriod.Duration {
allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.MinSyncPeriod, fmt.Sprintf("must be greater than or equal to %s", fldPath.Child("MinSyncPeriod").String())))
}
return allErrs
}
func validateKubeProxyIPVSConfiguration(config kubeproxyconfig.KubeProxyIPVSConfiguration, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if config.SyncPeriod.Duration <= 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.SyncPeriod, "must be greater than 0"))
}
if config.MinSyncPeriod.Duration < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("MinSyncPeriod"), config.MinSyncPeriod, "must be greater than or equal to 0"))
}
if config.MinSyncPeriod.Duration > config.SyncPeriod.Duration {
allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.MinSyncPeriod, fmt.Sprintf("must be greater than or equal to %s", fldPath.Child("MinSyncPeriod").String())))
}
allErrs = append(allErrs, validateIPVSSchedulerMethod(kubeproxyconfig.IPVSSchedulerMethod(config.Scheduler), fldPath.Child("Scheduler"))...)
allErrs = append(allErrs, validateIPVSExcludeCIDRs(config.ExcludeCIDRs, fldPath.Child("ExcludeCidrs"))...)
return allErrs
}
func validateKubeProxyConntrackConfiguration(config kubeproxyconfig.KubeProxyConntrackConfiguration, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if config.Max != nil && *config.Max < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("Max"), config.Max, "must be greater than or equal to 0"))
}
if config.MaxPerCore != nil && *config.MaxPerCore < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("MaxPerCore"), config.MaxPerCore, "must be greater than or equal to 0"))
}
if config.Min != nil && *config.Min < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("Min"), config.Min, "must be greater than or equal to 0"))
}
if config.TCPEstablishedTimeout.Duration < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPEstablishedTimeout"), config.TCPEstablishedTimeout, "must be greater than or equal to 0"))
}
if config.TCPCloseWaitTimeout.Duration < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPCloseWaitTimeout"), config.TCPCloseWaitTimeout, "must be greater than or equal to 0"))
}
return allErrs
}
func validateProxyMode(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList {
if runtime.GOOS == "windows" {
return validateProxyModeWindows(mode, fldPath)
}
return validateProxyModeLinux(mode, fldPath)
}
func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList {
validModes := sets.NewString(
string(kubeproxyconfig.ProxyModeUserspace),
string(kubeproxyconfig.ProxyModeIPTables),
string(kubeproxyconfig.ProxyModeIPVS),
)
if mode == "" || validModes.Has(string(mode)) {
return nil
}
errMsg := fmt.Sprintf("must be %s or blank (blank means the best-available proxy [currently iptables])", strings.Join(validModes.List(), ","))
return field.ErrorList{field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)}
}
func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList {
validModes := sets.NewString(
string(kubeproxyconfig.ProxyModeUserspace),
string(kubeproxyconfig.ProxyModeKernelspace),
)
if mode == "" || validModes.Has(string(mode)) {
return nil
}
errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently userspace])", strings.Join(validModes.List(), ","))
return field.ErrorList{field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)}
}
func validateClientConnectionConfiguration(config kubeproxyconfig.ClientConnectionConfiguration, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(config.Burst), fldPath.Child("Burst"))...)
return allErrs
}
func validateHostPort(input string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
hostIP, port, err := net.SplitHostPort(input)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, input, "must be IP:port"))
return allErrs
}
if ip := net.ParseIP(hostIP); ip == nil {
allErrs = append(allErrs, field.Invalid(fldPath, hostIP, "must be a valid IP"))
}
if p, err := strconv.Atoi(port); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, port, "must be a valid port"))
} else if p < 1 || p > 65535 {
allErrs = append(allErrs, field.Invalid(fldPath, port, "must be a valid port"))
}
return allErrs
}
func validateIPVSSchedulerMethod(scheduler kubeproxyconfig.IPVSSchedulerMethod, fldPath *field.Path) field.ErrorList {
supportedMethod := []kubeproxyconfig.IPVSSchedulerMethod{
kubeproxyconfig.RoundRobin,
kubeproxyconfig.WeightedRoundRobin,
kubeproxyconfig.LeastConnection,
kubeproxyconfig.WeightedLeastConnection,
kubeproxyconfig.LocalityBasedLeastConnection,
kubeproxyconfig.LocalityBasedLeastConnectionWithReplication,
kubeproxyconfig.SourceHashing,
kubeproxyconfig.DestinationHashing,
kubeproxyconfig.ShortestExpectedDelay,
kubeproxyconfig.NeverQueue,
"",
}
allErrs := field.ErrorList{}
var found bool
for i := range supportedMethod {
if scheduler == supportedMethod[i] {
found = true
break
}
}
// Not found
if !found {
errMsg := fmt.Sprintf("must be in %v, blank means the default algorithm method (currently rr)", supportedMethod)
allErrs = append(allErrs, field.Invalid(fldPath.Child("Scheduler"), string(scheduler), errMsg))
}
return allErrs
}
func validateKubeProxyNodePortAddress(nodePortAddresses []string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i := range nodePortAddresses {
if _, _, err := net.ParseCIDR(nodePortAddresses[i]); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, nodePortAddresses, "must be a valid IP block"))
break
}
}
return allErrs
}
func validateIPVSExcludeCIDRs(excludeCIDRs []string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i := range excludeCIDRs {
if _, _, err := net.ParseCIDR(excludeCIDRs[i]); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, excludeCIDRs, "must be a valid IP block"))
}
}
return allErrs
}

View File

@ -1,823 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"runtime"
"strings"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
"k8s.io/kubernetes/pkg/util/pointer"
)
func TestValidateKubeProxyConfiguration(t *testing.T) {
var proxyMode kubeproxyconfig.ProxyMode
if runtime.GOOS == "windows" {
proxyMode = kubeproxyconfig.ProxyModeKernelspace
} else {
proxyMode = kubeproxyconfig.ProxyModeIPVS
}
successCases := []kubeproxyconfig.KubeProxyConfiguration{
{
BindAddress: "192.168.59.103",
HealthzBindAddress: "0.0.0.0:10256",
MetricsBindAddress: "127.0.0.1:10249",
ClusterCIDR: "192.168.59.0/24",
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Mode: proxyMode,
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
{
BindAddress: "192.168.59.103",
HealthzBindAddress: "0.0.0.0:10256",
MetricsBindAddress: "127.0.0.1:10249",
ClusterCIDR: "192.168.59.0/24",
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
}
for _, successCase := range successCases {
if errs := Validate(&successCase); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
errorCases := []struct {
config kubeproxyconfig.KubeProxyConfiguration
msg string
}{
{
config: kubeproxyconfig.KubeProxyConfiguration{
// only BindAddress is invalid
BindAddress: "10.10.12.11:2000",
HealthzBindAddress: "0.0.0.0:10256",
MetricsBindAddress: "127.0.0.1:10249",
ClusterCIDR: "192.168.59.0/24",
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
msg: "not a valid textual representation of an IP address",
},
{
config: kubeproxyconfig.KubeProxyConfiguration{
BindAddress: "10.10.12.11",
// only HealthzBindAddress is invalid
HealthzBindAddress: "0.0.0.0",
MetricsBindAddress: "127.0.0.1:10249",
ClusterCIDR: "192.168.59.0/24",
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
msg: "must be IP:port",
},
{
config: kubeproxyconfig.KubeProxyConfiguration{
BindAddress: "10.10.12.11",
HealthzBindAddress: "0.0.0.0:12345",
// only MetricsBindAddress is invalid
MetricsBindAddress: "127.0.0.1",
ClusterCIDR: "192.168.59.0/24",
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
msg: "must be IP:port",
},
{
config: kubeproxyconfig.KubeProxyConfiguration{
BindAddress: "10.10.12.11",
HealthzBindAddress: "0.0.0.0:12345",
MetricsBindAddress: "127.0.0.1:10249",
// only ClusterCIDR is invalid
ClusterCIDR: "192.168.59.0",
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
msg: "must be a valid CIDR block (e.g. 10.100.0.0/16)",
},
{
config: kubeproxyconfig.KubeProxyConfiguration{
BindAddress: "10.10.12.11",
HealthzBindAddress: "0.0.0.0:12345",
MetricsBindAddress: "127.0.0.1:10249",
ClusterCIDR: "192.168.59.0/24",
// only UDPIdleTimeout is invalid
UDPIdleTimeout: metav1.Duration{Duration: -1 * time.Second},
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
msg: "must be greater than 0",
},
{
config: kubeproxyconfig.KubeProxyConfiguration{
BindAddress: "10.10.12.11",
HealthzBindAddress: "0.0.0.0:12345",
MetricsBindAddress: "127.0.0.1:10249",
ClusterCIDR: "192.168.59.0/24",
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
// only ConfigSyncPeriod is invalid
ConfigSyncPeriod: metav1.Duration{Duration: -1 * time.Second},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
msg: "must be greater than 0",
},
{
config: kubeproxyconfig.KubeProxyConfiguration{
BindAddress: "192.168.59.103",
HealthzBindAddress: "0.0.0.0:10256",
MetricsBindAddress: "127.0.0.1:10249",
ClusterCIDR: "192.168.59.0/24",
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
// not specifying valid period in IPVS mode.
Mode: kubeproxyconfig.ProxyModeIPVS,
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
msg: "must be greater than 0",
},
}
for _, errorCase := range errorCases {
if errs := Validate(&errorCase.config); len(errs) == 0 {
t.Errorf("expected failure for %s", errorCase.msg)
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
}
}
}
func TestValidateKubeProxyIPTablesConfiguration(t *testing.T) {
valid := int32(5)
successCases := []kubeproxyconfig.KubeProxyIPTablesConfiguration{
{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
{
MasqueradeBit: &valid,
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
}
newPath := field.NewPath("KubeProxyConfiguration")
for _, successCase := range successCases {
if errs := validateKubeProxyIPTablesConfiguration(successCase, newPath.Child("KubeProxyIPTablesConfiguration")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
invalid := int32(-10)
errorCases := []struct {
config kubeproxyconfig.KubeProxyIPTablesConfiguration
msg string
}{
{
config: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: -5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
msg: "must be greater than 0",
},
{
config: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeBit: &valid,
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: -1 * time.Second},
},
msg: "must be greater than or equal to 0",
},
{
config: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeBit: &invalid,
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
msg: "must be within the range [0, 31]",
},
// SyncPeriod must be >= MinSyncPeriod
{
config: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeBit: &valid,
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 1 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
},
msg: fmt.Sprintf("must be greater than or equal to %s", newPath.Child("KubeProxyIPTablesConfiguration").Child("MinSyncPeriod").String()),
},
}
for _, errorCase := range errorCases {
if errs := validateKubeProxyIPTablesConfiguration(errorCase.config, newPath.Child("KubeProxyIPTablesConfiguration")); len(errs) == 0 {
t.Errorf("expected failure for %s", errorCase.msg)
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
}
}
}
func TestValidateKubeProxyIPVSConfiguration(t *testing.T) {
newPath := field.NewPath("KubeProxyConfiguration")
testCases := []struct {
config kubeproxyconfig.KubeProxyIPVSConfiguration
expectErr bool
reason string
}{
{
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
SyncPeriod: metav1.Duration{Duration: -5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
expectErr: true,
reason: "SyncPeriod must be greater than 0",
},
{
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
SyncPeriod: metav1.Duration{Duration: 0 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 10 * time.Second},
},
expectErr: true,
reason: "SyncPeriod must be greater than 0",
},
{
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: -1 * time.Second},
},
expectErr: true,
reason: "MinSyncPeriod must be greater than or equal to 0",
},
{
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
SyncPeriod: metav1.Duration{Duration: 1 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
},
expectErr: true,
reason: "SyncPeriod must be greater than or equal to MinSyncPeriod",
},
// SyncPeriod == MinSyncPeriod
{
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 10 * time.Second},
},
expectErr: false,
},
// SyncPeriod > MinSyncPeriod
{
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
},
expectErr: false,
},
// SyncPeriod can be 0
{
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 0 * time.Second},
},
expectErr: false,
},
}
for _, test := range testCases {
errs := validateKubeProxyIPVSConfiguration(test.config, newPath.Child("KubeProxyIPVSConfiguration"))
if len(errs) == 0 && test.expectErr {
t.Errorf("Expect error, got nil, reason: %s", test.reason)
}
if len(errs) > 0 && !test.expectErr {
t.Errorf("Unexpected error: %v", errs)
}
}
}
func TestValidateKubeProxyConntrackConfiguration(t *testing.T) {
successCases := []kubeproxyconfig.KubeProxyConntrackConfiguration{
{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
{
Max: pointer.Int32Ptr(0),
MaxPerCore: pointer.Int32Ptr(0),
Min: pointer.Int32Ptr(0),
TCPEstablishedTimeout: &metav1.Duration{Duration: 0 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 0 * time.Second},
},
}
newPath := field.NewPath("KubeProxyConfiguration")
for _, successCase := range successCases {
if errs := validateKubeProxyConntrackConfiguration(successCase, newPath.Child("KubeProxyConntrackConfiguration")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
errorCases := []struct {
config kubeproxyconfig.KubeProxyConntrackConfiguration
msg string
}{
{
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(-1),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
msg: "must be greater than or equal to 0",
},
{
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(-1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
msg: "must be greater than or equal to 0",
},
{
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(-1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
msg: "must be greater than or equal to 0",
},
{
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(4),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(3),
TCPEstablishedTimeout: &metav1.Duration{Duration: -5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
msg: "must be greater than or equal to 0",
},
{
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(4),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(3),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: -5 * time.Second},
},
msg: "must be greater than or equal to 0",
},
}
for _, errorCase := range errorCases {
if errs := validateKubeProxyConntrackConfiguration(errorCase.config, newPath.Child("KubeProxyConntrackConfiguration")); len(errs) == 0 {
t.Errorf("expected failure for %s", errorCase.msg)
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
}
}
}
func TestValidateProxyMode(t *testing.T) {
newPath := field.NewPath("KubeProxyConfiguration")
successCases := []kubeproxyconfig.ProxyMode{
kubeproxyconfig.ProxyModeUserspace,
kubeproxyconfig.ProxyMode(""),
}
if runtime.GOOS == "windows" {
successCases = append(successCases, kubeproxyconfig.ProxyModeKernelspace)
} else {
successCases = append(successCases, kubeproxyconfig.ProxyModeIPTables, kubeproxyconfig.ProxyModeIPVS)
}
for _, successCase := range successCases {
if errs := validateProxyMode(successCase, newPath.Child("ProxyMode")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
errorCases := []struct {
mode kubeproxyconfig.ProxyMode
msg string
}{
{
mode: kubeproxyconfig.ProxyMode("non-existing"),
msg: "or blank (blank means the",
},
}
for _, errorCase := range errorCases {
if errs := validateProxyMode(errorCase.mode, newPath.Child("ProxyMode")); len(errs) == 0 {
t.Errorf("expected failure %s for %v", errorCase.msg, errorCase.mode)
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
}
}
}
func TestValidateClientConnectionConfiguration(t *testing.T) {
newPath := field.NewPath("KubeProxyConfiguration")
successCases := []kubeproxyconfig.ClientConnectionConfiguration{
{
Burst: 0,
},
{
Burst: 5,
},
}
for _, successCase := range successCases {
if errs := validateClientConnectionConfiguration(successCase, newPath.Child("Burst")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
errorCases := []struct {
ccc kubeproxyconfig.ClientConnectionConfiguration
msg string
}{
{
ccc: kubeproxyconfig.ClientConnectionConfiguration{Burst: -5},
msg: "must be greater than or equal to 0",
},
}
for _, errorCase := range errorCases {
if errs := validateClientConnectionConfiguration(errorCase.ccc, newPath.Child("Burst")); len(errs) == 0 {
t.Errorf("expected failure for %s", errorCase.msg)
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
}
}
}
func TestValidateHostPort(t *testing.T) {
newPath := field.NewPath("KubeProxyConfiguration")
successCases := []string{
"0.0.0.0:10256",
"127.0.0.1:10256",
"10.10.10.10:10256",
}
for _, successCase := range successCases {
if errs := validateHostPort(successCase, newPath.Child("HealthzBindAddress")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
errorCases := []struct {
ccc string
msg string
}{
{
ccc: "10.10.10.10",
msg: "must be IP:port",
},
{
ccc: "123.456.789.10:12345",
msg: "must be a valid IP",
},
{
ccc: "10.10.10.10:foo",
msg: "must be a valid port",
},
{
ccc: "10.10.10.10:0",
msg: "must be a valid port",
},
{
ccc: "10.10.10.10:65536",
msg: "must be a valid port",
},
}
for _, errorCase := range errorCases {
if errs := validateHostPort(errorCase.ccc, newPath.Child("HealthzBindAddress")); len(errs) == 0 {
t.Errorf("expected failure for %s", errorCase.msg)
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
}
}
}
func TestValidateIPVSSchedulerMethod(t *testing.T) {
newPath := field.NewPath("KubeProxyConfiguration")
successCases := []kubeproxyconfig.IPVSSchedulerMethod{
kubeproxyconfig.RoundRobin,
kubeproxyconfig.WeightedRoundRobin,
kubeproxyconfig.LeastConnection,
kubeproxyconfig.WeightedLeastConnection,
kubeproxyconfig.LocalityBasedLeastConnection,
kubeproxyconfig.LocalityBasedLeastConnectionWithReplication,
kubeproxyconfig.SourceHashing,
kubeproxyconfig.DestinationHashing,
kubeproxyconfig.ShortestExpectedDelay,
kubeproxyconfig.NeverQueue,
"",
}
for _, successCase := range successCases {
if errs := validateIPVSSchedulerMethod(successCase, newPath.Child("Scheduler")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
errorCases := []struct {
mode kubeproxyconfig.IPVSSchedulerMethod
msg string
}{
{
mode: kubeproxyconfig.IPVSSchedulerMethod("non-existing"),
msg: "blank means the default algorithm method (currently rr)",
},
}
for _, errorCase := range errorCases {
if errs := validateIPVSSchedulerMethod(errorCase.mode, newPath.Child("ProxyMode")); len(errs) == 0 {
t.Errorf("expected failure for %s", errorCase.msg)
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
}
}
}
func TestValidateKubeProxyNodePortAddress(t *testing.T) {
newPath := field.NewPath("KubeProxyConfiguration")
successCases := []struct {
addresses []string
}{
{[]string{}},
{[]string{"127.0.0.0/8"}},
{[]string{"0.0.0.0/0"}},
{[]string{"::/0"}},
{[]string{"127.0.0.1/32", "1.2.3.0/24"}},
{[]string{"127.0.0.0/8"}},
{[]string{"127.0.0.1/32"}},
{[]string{"::1/128"}},
{[]string{"1.2.3.4/32"}},
{[]string{"10.20.30.0/24"}},
{[]string{"10.20.0.0/16", "100.200.0.0/16"}},
{[]string{"10.0.0.0/8"}},
{[]string{"2001:db8::/32"}},
}
for _, successCase := range successCases {
if errs := validateKubeProxyNodePortAddress(successCase.addresses, newPath.Child("NodePortAddresses")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
errorCases := []struct {
addresses []string
msg string
}{
{
addresses: []string{"foo"},
msg: "must be a valid IP block",
},
{
addresses: []string{"1.2.3"},
msg: "must be a valid IP block",
},
{
addresses: []string{""},
msg: "must be a valid IP block",
},
{
addresses: []string{"10.20.30.40"},
msg: "must be a valid IP block",
},
{
addresses: []string{"::1"},
msg: "must be a valid IP block",
},
{
addresses: []string{"2001:db8:1"},
msg: "must be a valid IP block",
},
{
addresses: []string{"2001:db8:xyz/64"},
msg: "must be a valid IP block",
},
}
for _, errorCase := range errorCases {
if errs := validateKubeProxyNodePortAddress(errorCase.addresses, newPath.Child("NodePortAddresses")); len(errs) == 0 {
t.Errorf("expected failure for %s", errorCase.msg)
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
}
}
}
func TestValidateKubeProxyExcludeCIDRs(t *testing.T) {
// TODO(rramkumar): This test is a copy of TestValidateKubeProxyNodePortAddress.
// Maybe some code can be shared?
newPath := field.NewPath("KubeProxyConfiguration")
successCases := []struct {
addresses []string
}{
{[]string{}},
{[]string{"127.0.0.0/8"}},
{[]string{"0.0.0.0/0"}},
{[]string{"::/0"}},
{[]string{"127.0.0.1/32", "1.2.3.0/24"}},
{[]string{"127.0.0.0/8"}},
{[]string{"127.0.0.1/32"}},
{[]string{"::1/128"}},
{[]string{"1.2.3.4/32"}},
{[]string{"10.20.30.0/24"}},
{[]string{"10.20.0.0/16", "100.200.0.0/16"}},
{[]string{"10.0.0.0/8"}},
{[]string{"2001:db8::/32"}},
}
for _, successCase := range successCases {
if errs := validateIPVSExcludeCIDRs(successCase.addresses, newPath.Child("ExcludeCIDRs")); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
errorCases := []struct {
addresses []string
msg string
}{
{
addresses: []string{"foo"},
msg: "must be a valid IP block",
},
{
addresses: []string{"1.2.3"},
msg: "must be a valid IP block",
},
{
addresses: []string{""},
msg: "must be a valid IP block",
},
{
addresses: []string{"10.20.30.40"},
msg: "must be a valid IP block",
},
{
addresses: []string{"::1"},
msg: "must be a valid IP block",
},
{
addresses: []string{"2001:db8:1"},
msg: "must be a valid IP block",
},
{
addresses: []string{"2001:db8:xyz/64"},
msg: "must be a valid IP block",
},
}
for _, errorCase := range errorCases {
if errs := validateIPVSExcludeCIDRs(errorCase.addresses, newPath.Child("ExcludeCIDRs")); len(errs) == 0 {
t.Errorf("expected failure for %s", errorCase.msg)
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
}
}
}

View File

@ -1,227 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package kubeproxyconfig
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration.
func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration {
if in == nil {
return nil
}
out := new(ClientConnectionConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ConfigurationMap) DeepCopyInto(out *ConfigurationMap) {
{
in := &in
*out = make(ConfigurationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationMap.
func (in ConfigurationMap) DeepCopy() ConfigurationMap {
if in == nil {
return nil
}
out := new(ConfigurationMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]bool, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.ClientConnection = in.ClientConnection
in.IPTables.DeepCopyInto(&out.IPTables)
in.IPVS.DeepCopyInto(&out.IPVS)
if in.OOMScoreAdj != nil {
in, out := &in.OOMScoreAdj, &out.OOMScoreAdj
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
out.UDPIdleTimeout = in.UDPIdleTimeout
in.Conntrack.DeepCopyInto(&out.Conntrack)
out.ConfigSyncPeriod = in.ConfigSyncPeriod
if in.NodePortAddresses != nil {
in, out := &in.NodePortAddresses, &out.NodePortAddresses
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration.
func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) {
*out = *in
if in.Max != nil {
in, out := &in.Max, &out.Max
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.MaxPerCore != nil {
in, out := &in.MaxPerCore, &out.MaxPerCore
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.Min != nil {
in, out := &in.Min, &out.Min
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.TCPEstablishedTimeout != nil {
in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout
if *in == nil {
*out = nil
} else {
*out = new(v1.Duration)
**out = **in
}
}
if in.TCPCloseWaitTimeout != nil {
in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout
if *in == nil {
*out = nil
} else {
*out = new(v1.Duration)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration.
func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyConntrackConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) {
*out = *in
if in.MasqueradeBit != nil {
in, out := &in.MasqueradeBit, &out.MasqueradeBit
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration.
func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyIPTablesConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) {
*out = *in
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
if in.ExcludeCIDRs != nil {
in, out := &in.ExcludeCIDRs, &out.ExcludeCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration.
func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyIPVSConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@ -1,57 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"config.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/config",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library",
"//pkg/client/listers/core/internalversion:go_default_library",
"//pkg/controller:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"api_test.go",
"config_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,6 +0,0 @@
reviewers:
- thockin
- lavalamp
- smarterclayton
- brendandburns
- freehan

View File

@ -1,214 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"reflect"
"sync"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
ktesting "k8s.io/client-go/testing"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
)
func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
service1v1 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s1"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}}}
service1v2 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s1"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}}}
service2 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s2"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 30}}}}
// Setup fake api client.
client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
stopCh := make(chan struct{})
defer close(stopCh)
handler := NewServiceHandlerMock()
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
serviceConfig := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
serviceConfig.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh)
go serviceConfig.Run(stopCh)
// Add the first service
fakeWatch.Add(service1v1)
handler.ValidateServices(t, []*api.Service{service1v1})
// Add another service
fakeWatch.Add(service2)
handler.ValidateServices(t, []*api.Service{service1v1, service2})
// Modify service1
fakeWatch.Modify(service1v2)
handler.ValidateServices(t, []*api.Service{service1v2, service2})
// Delete service1
fakeWatch.Delete(service1v2)
handler.ValidateServices(t, []*api.Service{service2})
// Delete service2
fakeWatch.Delete(service2)
handler.ValidateServices(t, []*api.Service{})
}
func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) {
endpoints1v1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e1"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "1.2.3.4"},
},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}},
}
endpoints1v2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e1"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "4.3.2.1"},
},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}},
}
endpoints2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e2"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
{IP: "5.6.7.8"},
},
Ports: []api.EndpointPort{{Port: 80, Protocol: "TCP"}},
}},
}
// Setup fake api client.
client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
client.PrependWatchReactor("endpoints", ktesting.DefaultWatchReactor(fakeWatch, nil))
stopCh := make(chan struct{})
defer close(stopCh)
handler := NewEndpointsHandlerMock()
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
endpointsConfig := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
endpointsConfig.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh)
go endpointsConfig.Run(stopCh)
// Add the first endpoints
fakeWatch.Add(endpoints1v1)
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints1v1})
// Add another endpoints
fakeWatch.Add(endpoints2)
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints1v1, endpoints2})
// Modify endpoints1
fakeWatch.Modify(endpoints1v2)
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints1v2, endpoints2})
// Delete endpoints1
fakeWatch.Delete(endpoints1v2)
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints2})
// Delete endpoints2
fakeWatch.Delete(endpoints2)
handler.ValidateEndpoints(t, []*api.Endpoints{})
}
func newSvcHandler(t *testing.T, svcs []*api.Service, done func()) ServiceHandler {
shm := &ServiceHandlerMock{
state: make(map[types.NamespacedName]*api.Service),
}
shm.process = func(services []*api.Service) {
defer done()
if !reflect.DeepEqual(services, svcs) {
t.Errorf("Unexpected services: %#v, expected: %#v", services, svcs)
}
}
return shm
}
func newEpsHandler(t *testing.T, eps []*api.Endpoints, done func()) EndpointsHandler {
ehm := &EndpointsHandlerMock{
state: make(map[types.NamespacedName]*api.Endpoints),
}
ehm.process = func(endpoints []*api.Endpoints) {
defer done()
if !reflect.DeepEqual(eps, endpoints) {
t.Errorf("Unexpected endpoints: %#v, expected: %#v", endpoints, eps)
}
}
return ehm
}
func TestInitialSync(t *testing.T) {
svc1 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
}
svc2 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
}
eps1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
}
eps2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
}
var wg sync.WaitGroup
// Wait for both services and endpoints handler.
wg.Add(2)
// Setup fake api client.
client := fake.NewSimpleClientset(svc1, svc2, eps2, eps1)
sharedInformers := informers.NewSharedInformerFactory(client, 0)
svcConfig := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), 0)
epsConfig := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), 0)
svcHandler := newSvcHandler(t, []*api.Service{svc2, svc1}, wg.Done)
svcConfig.RegisterEventHandler(svcHandler)
epsHandler := newEpsHandler(t, []*api.Endpoints{eps2, eps1}, wg.Done)
epsConfig.RegisterEventHandler(epsHandler)
stopCh := make(chan struct{})
defer close(stopCh)
go sharedInformers.Start(stopCh)
go svcConfig.Run(stopCh)
go epsConfig.Run(stopCh)
wg.Wait()
}

View File

@ -1,263 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"time"
"github.com/golang/glog"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
api "k8s.io/kubernetes/pkg/apis/core"
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion"
listers "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
"k8s.io/kubernetes/pkg/controller"
)
// ServiceHandler is an abstract interface of objects which receive
// notifications about service object changes.
type ServiceHandler interface {
// OnServiceAdd is called whenever creation of new service object
// is observed.
OnServiceAdd(service *api.Service)
// OnServiceUpdate is called whenever modification of an existing
// service object is observed.
OnServiceUpdate(oldService, service *api.Service)
// OnServiceDelete is called whenever deletion of an existing service
// object is observed.
OnServiceDelete(service *api.Service)
// OnServiceSynced is called once all the initial even handlers were
// called and the state is fully propagated to local cache.
OnServiceSynced()
}
// EndpointsHandler is an abstract interface of objects which receive
// notifications about endpoints object changes.
type EndpointsHandler interface {
// OnEndpointsAdd is called whenever creation of new endpoints object
// is observed.
OnEndpointsAdd(endpoints *api.Endpoints)
// OnEndpointsUpdate is called whenever modification of an existing
// endpoints object is observed.
OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints)
// OnEndpointsDelete is called whever deletion of an existing endpoints
// object is observed.
OnEndpointsDelete(endpoints *api.Endpoints)
// OnEndpointsSynced is called once all the initial event handlers were
// called and the state is fully propagated to local cache.
OnEndpointsSynced()
}
// EndpointsConfig tracks a set of endpoints configurations.
// It accepts "set", "add" and "remove" operations of endpoints via channels, and invokes registered handlers on change.
type EndpointsConfig struct {
lister listers.EndpointsLister
listerSynced cache.InformerSynced
eventHandlers []EndpointsHandler
}
// NewEndpointsConfig creates a new EndpointsConfig.
func NewEndpointsConfig(endpointsInformer coreinformers.EndpointsInformer, resyncPeriod time.Duration) *EndpointsConfig {
result := &EndpointsConfig{
lister: endpointsInformer.Lister(),
listerSynced: endpointsInformer.Informer().HasSynced,
}
endpointsInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: result.handleAddEndpoints,
UpdateFunc: result.handleUpdateEndpoints,
DeleteFunc: result.handleDeleteEndpoints,
},
resyncPeriod,
)
return result
}
// RegisterEventHandler registers a handler which is called on every endpoints change.
func (c *EndpointsConfig) RegisterEventHandler(handler EndpointsHandler) {
c.eventHandlers = append(c.eventHandlers, handler)
}
// Run starts the goroutine responsible for calling registered handlers.
func (c *EndpointsConfig) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Info("Starting endpoints config controller")
defer glog.Info("Shutting down endpoints config controller")
if !controller.WaitForCacheSync("endpoints config", stopCh, c.listerSynced) {
return
}
for i := range c.eventHandlers {
glog.V(3).Infof("Calling handler.OnEndpointsSynced()")
c.eventHandlers[i].OnEndpointsSynced()
}
<-stopCh
}
func (c *EndpointsConfig) handleAddEndpoints(obj interface{}) {
endpoints, ok := obj.(*api.Endpoints)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnEndpointsAdd")
c.eventHandlers[i].OnEndpointsAdd(endpoints)
}
}
func (c *EndpointsConfig) handleUpdateEndpoints(oldObj, newObj interface{}) {
oldEndpoints, ok := oldObj.(*api.Endpoints)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", oldObj))
return
}
endpoints, ok := newObj.(*api.Endpoints)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj))
return
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnEndpointsUpdate")
c.eventHandlers[i].OnEndpointsUpdate(oldEndpoints, endpoints)
}
}
func (c *EndpointsConfig) handleDeleteEndpoints(obj interface{}) {
endpoints, ok := obj.(*api.Endpoints)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
if endpoints, ok = tombstone.Obj.(*api.Endpoints); !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnEndpointsDelete")
c.eventHandlers[i].OnEndpointsDelete(endpoints)
}
}
// ServiceConfig tracks a set of service configurations.
// It accepts "set", "add" and "remove" operations of services via channels, and invokes registered handlers on change.
type ServiceConfig struct {
lister listers.ServiceLister
listerSynced cache.InformerSynced
eventHandlers []ServiceHandler
}
// NewServiceConfig creates a new ServiceConfig.
func NewServiceConfig(serviceInformer coreinformers.ServiceInformer, resyncPeriod time.Duration) *ServiceConfig {
result := &ServiceConfig{
lister: serviceInformer.Lister(),
listerSynced: serviceInformer.Informer().HasSynced,
}
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: result.handleAddService,
UpdateFunc: result.handleUpdateService,
DeleteFunc: result.handleDeleteService,
},
resyncPeriod,
)
return result
}
// RegisterEventHandler registers a handler which is called on every service change.
func (c *ServiceConfig) RegisterEventHandler(handler ServiceHandler) {
c.eventHandlers = append(c.eventHandlers, handler)
}
// Run starts the goroutine responsible for calling
// registered handlers.
func (c *ServiceConfig) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Info("Starting service config controller")
defer glog.Info("Shutting down service config controller")
if !controller.WaitForCacheSync("service config", stopCh, c.listerSynced) {
return
}
for i := range c.eventHandlers {
glog.V(3).Infof("Calling handler.OnServiceSynced()")
c.eventHandlers[i].OnServiceSynced()
}
<-stopCh
}
func (c *ServiceConfig) handleAddService(obj interface{}) {
service, ok := obj.(*api.Service)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnServiceAdd")
c.eventHandlers[i].OnServiceAdd(service)
}
}
func (c *ServiceConfig) handleUpdateService(oldObj, newObj interface{}) {
oldService, ok := oldObj.(*api.Service)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", oldObj))
return
}
service, ok := newObj.(*api.Service)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj))
return
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnServiceUpdate")
c.eventHandlers[i].OnServiceUpdate(oldService, service)
}
}
func (c *ServiceConfig) handleDeleteService(obj interface{}) {
service, ok := obj.(*api.Service)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
if service, ok = tombstone.Obj.(*api.Service); !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnServiceDelete")
c.eventHandlers[i].OnServiceDelete(service)
}
}

View File

@ -1,435 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"reflect"
"sort"
"sync"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
ktesting "k8s.io/client-go/testing"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
)
type sortedServices []*api.Service
func (s sortedServices) Len() int {
return len(s)
}
func (s sortedServices) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sortedServices) Less(i, j int) bool {
return s[i].Name < s[j].Name
}
type ServiceHandlerMock struct {
lock sync.Mutex
state map[types.NamespacedName]*api.Service
synced bool
updated chan []*api.Service
process func([]*api.Service)
}
func NewServiceHandlerMock() *ServiceHandlerMock {
shm := &ServiceHandlerMock{
state: make(map[types.NamespacedName]*api.Service),
updated: make(chan []*api.Service, 5),
}
shm.process = func(services []*api.Service) {
shm.updated <- services
}
return shm
}
func (h *ServiceHandlerMock) OnServiceAdd(service *api.Service) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
h.state[namespacedName] = service
h.sendServices()
}
func (h *ServiceHandlerMock) OnServiceUpdate(oldService, service *api.Service) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
h.state[namespacedName] = service
h.sendServices()
}
func (h *ServiceHandlerMock) OnServiceDelete(service *api.Service) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
delete(h.state, namespacedName)
h.sendServices()
}
func (h *ServiceHandlerMock) OnServiceSynced() {
h.lock.Lock()
defer h.lock.Unlock()
h.synced = true
h.sendServices()
}
func (h *ServiceHandlerMock) sendServices() {
if !h.synced {
return
}
services := make([]*api.Service, 0, len(h.state))
for _, svc := range h.state {
services = append(services, svc)
}
sort.Sort(sortedServices(services))
h.process(services)
}
func (h *ServiceHandlerMock) ValidateServices(t *testing.T, expectedServices []*api.Service) {
// We might get 1 or more updates for N service updates, because we
// over write older snapshots of services from the producer go-routine
// if the consumer falls behind.
var services []*api.Service
for {
select {
case services = <-h.updated:
if reflect.DeepEqual(services, expectedServices) {
return
}
// Unittests will hard timeout in 5m with a stack trace, prevent that
// and surface a clearer reason for failure.
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Timed out. Expected %#v, Got %#v", expectedServices, services)
return
}
}
}
type sortedEndpoints []*api.Endpoints
func (s sortedEndpoints) Len() int {
return len(s)
}
func (s sortedEndpoints) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sortedEndpoints) Less(i, j int) bool {
return s[i].Name < s[j].Name
}
type EndpointsHandlerMock struct {
lock sync.Mutex
state map[types.NamespacedName]*api.Endpoints
synced bool
updated chan []*api.Endpoints
process func([]*api.Endpoints)
}
func NewEndpointsHandlerMock() *EndpointsHandlerMock {
ehm := &EndpointsHandlerMock{
state: make(map[types.NamespacedName]*api.Endpoints),
updated: make(chan []*api.Endpoints, 5),
}
ehm.process = func(endpoints []*api.Endpoints) {
ehm.updated <- endpoints
}
return ehm
}
func (h *EndpointsHandlerMock) OnEndpointsAdd(endpoints *api.Endpoints) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
h.state[namespacedName] = endpoints
h.sendEndpoints()
}
func (h *EndpointsHandlerMock) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
h.state[namespacedName] = endpoints
h.sendEndpoints()
}
func (h *EndpointsHandlerMock) OnEndpointsDelete(endpoints *api.Endpoints) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
delete(h.state, namespacedName)
h.sendEndpoints()
}
func (h *EndpointsHandlerMock) OnEndpointsSynced() {
h.lock.Lock()
defer h.lock.Unlock()
h.synced = true
h.sendEndpoints()
}
func (h *EndpointsHandlerMock) sendEndpoints() {
if !h.synced {
return
}
endpoints := make([]*api.Endpoints, 0, len(h.state))
for _, eps := range h.state {
endpoints = append(endpoints, eps)
}
sort.Sort(sortedEndpoints(endpoints))
h.process(endpoints)
}
func (h *EndpointsHandlerMock) ValidateEndpoints(t *testing.T, expectedEndpoints []*api.Endpoints) {
// We might get 1 or more updates for N endpoint updates, because we
// over write older snapshots of endpoints from the producer go-routine
// if the consumer falls behind. Unittests will hard timeout in 5m.
var endpoints []*api.Endpoints
for {
select {
case endpoints = <-h.updated:
if reflect.DeepEqual(endpoints, expectedEndpoints) {
return
}
// Unittests will hard timeout in 5m with a stack trace, prevent that
// and surface a clearer reason for failure.
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Timed out. Expected %#v, Got %#v", expectedEndpoints, endpoints)
return
}
}
}
func TestNewServiceAddedAndNotified(t *testing.T) {
client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
stopCh := make(chan struct{})
defer close(stopCh)
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
handler := NewServiceHandlerMock()
config.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh)
go config.Run(stopCh)
service := &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
}
fakeWatch.Add(service)
handler.ValidateServices(t, []*api.Service{service})
}
func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
stopCh := make(chan struct{})
defer close(stopCh)
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
handler := NewServiceHandlerMock()
config.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh)
go config.Run(stopCh)
service1 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
}
fakeWatch.Add(service1)
handler.ValidateServices(t, []*api.Service{service1})
service2 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}},
}
fakeWatch.Add(service2)
services := []*api.Service{service2, service1}
handler.ValidateServices(t, services)
fakeWatch.Delete(service1)
services = []*api.Service{service2}
handler.ValidateServices(t, services)
}
func TestNewServicesMultipleHandlersAddedAndNotified(t *testing.T) {
client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
stopCh := make(chan struct{})
defer close(stopCh)
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
handler := NewServiceHandlerMock()
handler2 := NewServiceHandlerMock()
config.RegisterEventHandler(handler)
config.RegisterEventHandler(handler2)
go sharedInformers.Start(stopCh)
go config.Run(stopCh)
service1 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
}
service2 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}},
}
fakeWatch.Add(service1)
fakeWatch.Add(service2)
services := []*api.Service{service2, service1}
handler.ValidateServices(t, services)
handler2.ValidateServices(t, services)
}
func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
client.PrependWatchReactor("endpoints", ktesting.DefaultWatchReactor(fakeWatch, nil))
stopCh := make(chan struct{})
defer close(stopCh)
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
handler := NewEndpointsHandlerMock()
handler2 := NewEndpointsHandlerMock()
config.RegisterEventHandler(handler)
config.RegisterEventHandler(handler2)
go sharedInformers.Start(stopCh)
go config.Run(stopCh)
endpoints1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}},
Ports: []api.EndpointPort{{Port: 80}},
}},
}
endpoints2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "3.3.3.3"}, {IP: "4.4.4.4"}},
Ports: []api.EndpointPort{{Port: 80}},
}},
}
fakeWatch.Add(endpoints1)
fakeWatch.Add(endpoints2)
endpoints := []*api.Endpoints{endpoints2, endpoints1}
handler.ValidateEndpoints(t, endpoints)
handler2.ValidateEndpoints(t, endpoints)
}
func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) {
client := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
client.PrependWatchReactor("endpoints", ktesting.DefaultWatchReactor(fakeWatch, nil))
stopCh := make(chan struct{})
defer close(stopCh)
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
handler := NewEndpointsHandlerMock()
handler2 := NewEndpointsHandlerMock()
config.RegisterEventHandler(handler)
config.RegisterEventHandler(handler2)
go sharedInformers.Start(stopCh)
go config.Run(stopCh)
endpoints1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}},
Ports: []api.EndpointPort{{Port: 80}},
}},
}
endpoints2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "3.3.3.3"}, {IP: "4.4.4.4"}},
Ports: []api.EndpointPort{{Port: 80}},
}},
}
fakeWatch.Add(endpoints1)
fakeWatch.Add(endpoints2)
endpoints := []*api.Endpoints{endpoints2, endpoints1}
handler.ValidateEndpoints(t, endpoints)
handler2.ValidateEndpoints(t, endpoints)
// Add one more
endpoints3 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foobar"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "5.5.5.5"}, {IP: "6.6.6.6"}},
Ports: []api.EndpointPort{{Port: 80}},
}},
}
fakeWatch.Add(endpoints3)
endpoints = []*api.Endpoints{endpoints2, endpoints1, endpoints3}
handler.ValidateEndpoints(t, endpoints)
handler2.ValidateEndpoints(t, endpoints)
// Update the "foo" service with new endpoints
endpoints1v2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "7.7.7.7"}},
Ports: []api.EndpointPort{{Port: 80}},
}},
}
fakeWatch.Modify(endpoints1v2)
endpoints = []*api.Endpoints{endpoints2, endpoints1v2, endpoints3}
handler.ValidateEndpoints(t, endpoints)
handler2.ValidateEndpoints(t, endpoints)
// Remove "bar" endpoints
fakeWatch.Delete(endpoints2)
endpoints = []*api.Endpoints{endpoints1v2, endpoints3}
handler.ValidateEndpoints(t, endpoints)
handler2.ValidateEndpoints(t, endpoints)
}
// TODO: Add a unittest for interrupts getting processed in a timely manner.
// Currently this module has a circular dependency with config, and so it's
// named config_test, which means even test methods need to be public. This
// is refactoring that we can avoid by resolving the dependency.

View File

@ -1,25 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package config provides decoupling between various configuration sources (etcd, files,...) and
// the pieces that actually care about them (loadbalancer, proxy). Config takes 1 or more
// configuration sources and allows for incremental (add/remove) and full replace (set)
// changes from each of the sources, then creates a union of the configuration and provides
// a unified view for both service handlers as well as endpoint handlers. There is no attempt
// to resolve conflicts of any sort. Basic idea is that each configuration source gets a channel
// from the Config service and pushes updates to it via that channel. Config then keeps track of
// incremental & replace changes and distributes them to listeners as appropriate.
package config // import "k8s.io/kubernetes/pkg/proxy/config"

View File

@ -1,18 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package proxy implements the layer-3 network proxy.
package proxy // import "k8s.io/kubernetes/pkg/proxy"

View File

@ -1,314 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"net"
"reflect"
"strconv"
"sync"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
utilnet "k8s.io/kubernetes/pkg/util/net"
)
// BaseEndpointInfo contains base information that defines an endpoint.
// This could be used directly by proxier while processing endpoints,
// or can be used for constructing a more specific EndpointInfo struct
// defined by the proxier if needed.
type BaseEndpointInfo struct {
Endpoint string // TODO: should be an endpointString type
// IsLocal indicates whether the endpoint is running in same host as kube-proxy.
IsLocal bool
}
var _ Endpoint = &BaseEndpointInfo{}
// String is part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) String() string {
return info.Endpoint
}
// GetIsLocal is part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) GetIsLocal() bool {
return info.IsLocal
}
// IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) IP() string {
return utilproxy.IPPart(info.Endpoint)
}
// Port returns just the Port part of the endpoint.
func (info *BaseEndpointInfo) Port() (int, error) {
return utilproxy.PortPart(info.Endpoint)
}
// Equal is part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) Equal(other Endpoint) bool {
return info.String() == other.String() && info.GetIsLocal() == other.GetIsLocal()
}
func newBaseEndpointInfo(IP string, port int, isLocal bool) *BaseEndpointInfo {
return &BaseEndpointInfo{
Endpoint: net.JoinHostPort(IP, strconv.Itoa(port)),
IsLocal: isLocal,
}
}
type makeEndpointFunc func(info *BaseEndpointInfo) Endpoint
// EndpointChangeTracker carries state about uncommitted changes to an arbitrary number of
// Endpoints, keyed by their namespace and name.
type EndpointChangeTracker struct {
// lock protects items.
lock sync.Mutex
// hostname is the host where kube-proxy is running.
hostname string
// items maps a service to is endpointsChange.
items map[types.NamespacedName]*endpointsChange
// makeEndpointInfo allows proxier to inject customized information when processing endpoint.
makeEndpointInfo makeEndpointFunc
// isIPv6Mode indicates if change tracker is under IPv6/IPv4 mode. Nil means not applicable.
isIPv6Mode *bool
recorder record.EventRecorder
}
// NewEndpointChangeTracker initializes an EndpointsChangeMap
func NewEndpointChangeTracker(hostname string, makeEndpointInfo makeEndpointFunc, isIPv6Mode *bool, recorder record.EventRecorder) *EndpointChangeTracker {
return &EndpointChangeTracker{
hostname: hostname,
items: make(map[types.NamespacedName]*endpointsChange),
makeEndpointInfo: makeEndpointInfo,
isIPv6Mode: isIPv6Mode,
recorder: recorder,
}
}
// Update updates given service's endpoints change map based on the <previous, current> endpoints pair. It returns true
// if items changed, otherwise return false. Update can be used to add/update/delete items of EndpointsChangeMap. For example,
// Add item
// - pass <nil, endpoints> as the <previous, current> pair.
// Update item
// - pass <oldEndpoints, endpoints> as the <previous, current> pair.
// Delete item
// - pass <endpoints, nil> as the <previous, current> pair.
func (ect *EndpointChangeTracker) Update(previous, current *api.Endpoints) bool {
endpoints := current
if endpoints == nil {
endpoints = previous
}
// previous == nil && current == nil is unexpected, we should return false directly.
if endpoints == nil {
return false
}
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
ect.lock.Lock()
defer ect.lock.Unlock()
change, exists := ect.items[namespacedName]
if !exists {
change = &endpointsChange{}
change.previous = ect.endpointsToEndpointsMap(previous)
ect.items[namespacedName] = change
}
change.current = ect.endpointsToEndpointsMap(current)
// if change.previous equal to change.current, it means no change
if reflect.DeepEqual(change.previous, change.current) {
delete(ect.items, namespacedName)
}
return len(ect.items) > 0
}
// endpointsChange contains all changes to endpoints that happened since proxy rules were synced. For a single object,
// changes are accumulated, i.e. previous is state from before applying the changes,
// current is state after applying the changes.
type endpointsChange struct {
previous EndpointsMap
current EndpointsMap
}
// UpdateEndpointMapResult is the updated results after applying endpoints changes.
type UpdateEndpointMapResult struct {
// HCEndpointsLocalIPSize maps an endpoints name to the length of its local IPs.
HCEndpointsLocalIPSize map[types.NamespacedName]int
// StaleEndpoints identifies if an endpoints service pair is stale.
StaleEndpoints []ServiceEndpoint
// StaleServiceNames identifies if a service is stale.
StaleServiceNames []ServicePortName
}
// UpdateEndpointsMap updates endpointsMap base on the given changes.
func UpdateEndpointsMap(endpointsMap EndpointsMap, changes *EndpointChangeTracker) (result UpdateEndpointMapResult) {
result.StaleEndpoints = make([]ServiceEndpoint, 0)
result.StaleServiceNames = make([]ServicePortName, 0)
endpointsMap.apply(changes, &result.StaleEndpoints, &result.StaleServiceNames)
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to endpointsMap.
result.HCEndpointsLocalIPSize = make(map[types.NamespacedName]int)
localIPs := GetLocalEndpointIPs(endpointsMap)
for nsn, ips := range localIPs {
result.HCEndpointsLocalIPSize[nsn] = len(ips)
}
return result
}
// EndpointsMap maps a service name to a list of all its Endpoints.
type EndpointsMap map[ServicePortName][]Endpoint
// endpointsToEndpointsMap translates single Endpoints object to EndpointsMap.
// This function is used for incremental updated of endpointsMap.
//
// NOTE: endpoints object should NOT be modified.
func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *api.Endpoints) EndpointsMap {
if endpoints == nil {
return nil
}
endpointsMap := make(EndpointsMap)
// We need to build a map of portname -> all ip:ports for that
// portname. Explode Endpoints.Subsets[*] into this structure.
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
for i := range ss.Ports {
port := &ss.Ports[i]
if port.Port == 0 {
glog.Warningf("ignoring invalid endpoint port %s", port.Name)
continue
}
svcPortName := ServicePortName{
NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name},
Port: port.Name,
}
for i := range ss.Addresses {
addr := &ss.Addresses[i]
if addr.IP == "" {
glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
continue
}
// Filter out the incorrect IP version case.
// Any endpoint port that contains incorrect IP version will be ignored.
if ect.isIPv6Mode != nil && utilnet.IsIPv6String(addr.IP) != *ect.isIPv6Mode {
// Emit event on the corresponding service which had a different
// IP version than the endpoint.
utilproxy.LogAndEmitIncorrectIPVersionEvent(ect.recorder, "endpoints", addr.IP, endpoints.Name, endpoints.Namespace, "")
continue
}
isLocal := addr.NodeName != nil && *addr.NodeName == ect.hostname
baseEndpointInfo := newBaseEndpointInfo(addr.IP, int(port.Port), isLocal)
if ect.makeEndpointInfo != nil {
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], ect.makeEndpointInfo(baseEndpointInfo))
} else {
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], baseEndpointInfo)
}
}
if glog.V(3) {
newEPList := []string{}
for _, ep := range endpointsMap[svcPortName] {
newEPList = append(newEPList, ep.String())
}
glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
}
}
}
return endpointsMap
}
// apply the changes to EndpointsMap and updates stale endpoints and service-endpoints pair. The `staleEndpoints` argument
// is passed in to store the stale udp endpoints and `staleServiceNames` argument is passed in to store the stale udp service.
// The changes map is cleared after applying them.
func (endpointsMap EndpointsMap) apply(changes *EndpointChangeTracker, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName) {
if changes == nil {
return
}
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
endpointsMap.Unmerge(change.previous)
endpointsMap.Merge(change.current)
detectStaleConnections(change.previous, change.current, staleEndpoints, staleServiceNames)
}
changes.items = make(map[types.NamespacedName]*endpointsChange)
}
// Merge ensures that the current EndpointsMap contains all <service, endpoints> pairs from the EndpointsMap passed in.
func (em EndpointsMap) Merge(other EndpointsMap) {
for svcPortName := range other {
em[svcPortName] = other[svcPortName]
}
}
// Unmerge removes the <service, endpoints> pairs from the current EndpointsMap which are contained in the EndpointsMap passed in.
func (em EndpointsMap) Unmerge(other EndpointsMap) {
for svcPortName := range other {
delete(em, svcPortName)
}
}
// GetLocalEndpointIPs returns endpoints IPs if given endpoint is local - local means the endpoint is running in same host as kube-proxy.
func GetLocalEndpointIPs(endpointsMap EndpointsMap) map[types.NamespacedName]sets.String {
localIPs := make(map[types.NamespacedName]sets.String)
for svcPortName, epList := range endpointsMap {
for _, ep := range epList {
if ep.GetIsLocal() {
nsn := svcPortName.NamespacedName
if localIPs[nsn] == nil {
localIPs[nsn] = sets.NewString()
}
localIPs[nsn].Insert(ep.IP())
}
}
}
return localIPs
}
// detectStaleConnections modifies <staleEndpoints> and <staleServices> with detected stale connections. <staleServiceNames>
// is used to store stale udp service in order to clear udp conntrack later.
func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName) {
for svcPortName, epList := range oldEndpointsMap {
for _, ep := range epList {
stale := true
for i := range newEndpointsMap[svcPortName] {
if newEndpointsMap[svcPortName][i].Equal(ep) {
stale = false
break
}
}
if stale {
glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.String())
*staleEndpoints = append(*staleEndpoints, ServiceEndpoint{Endpoint: ep.String(), ServicePortName: svcPortName})
}
}
}
for svcPortName, epList := range newEndpointsMap {
// For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service.
if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 {
*staleServiceNames = append(*staleServiceNames, svcPortName)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,51 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"healthcheck.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/healthcheck",
deps = [
"//pkg/apis/core:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/renstrom/dedent:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["healthcheck_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,2 +0,0 @@
reviewers:
- m1093782566

View File

@ -1,18 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package healthcheck provides tools for serving kube-proxy healthchecks.
package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck"

View File

@ -1,347 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package healthcheck
import (
"fmt"
"net"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/golang/glog"
"github.com/renstrom/dedent"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
)
var nodeHealthzRetryInterval = 60 * time.Second
// Server serves HTTP endpoints for each service name, with results
// based on the endpoints. If there are 0 endpoints for a service, it returns a
// 503 "Service Unavailable" error (telling LBs not to use this node). If there
// are 1 or more endpoints, it returns a 200 "OK".
type Server interface {
// Make the new set of services be active. Services that were open before
// will be closed. Services that are new will be opened. Service that
// existed and are in the new set will be left alone. The value of the map
// is the healthcheck-port to listen on.
SyncServices(newServices map[types.NamespacedName]uint16) error
// Make the new set of endpoints be active. Endpoints for services that do
// not exist will be dropped. The value of the map is the number of
// endpoints the service has on this node.
SyncEndpoints(newEndpoints map[types.NamespacedName]int) error
}
// Listener allows for testing of Server. If the Listener argument
// to NewServer() is nil, the real net.Listen function will be used.
type Listener interface {
// Listen is very much like net.Listen, except the first arg (network) is
// fixed to be "tcp".
Listen(addr string) (net.Listener, error)
}
// HTTPServerFactory allows for testing of Server. If the
// HTTPServerFactory argument to NewServer() is nil, the real
// http.Server type will be used.
type HTTPServerFactory interface {
// New creates an instance of a type satisfying HTTPServer. This is
// designed to include http.Server.
New(addr string, handler http.Handler) HTTPServer
}
// HTTPServer allows for testing of Server.
type HTTPServer interface {
// Server is designed so that http.Server satisfies this interface,
Serve(listener net.Listener) error
}
// NewServer allocates a new healthcheck server manager. If either
// of the injected arguments are nil, defaults will be used.
func NewServer(hostname string, recorder record.EventRecorder, listener Listener, httpServerFactory HTTPServerFactory) Server {
if listener == nil {
listener = stdNetListener{}
}
if httpServerFactory == nil {
httpServerFactory = stdHTTPServerFactory{}
}
return &server{
hostname: hostname,
recorder: recorder,
listener: listener,
httpFactory: httpServerFactory,
services: map[types.NamespacedName]*hcInstance{},
}
}
// Implement Listener in terms of net.Listen.
type stdNetListener struct{}
func (stdNetListener) Listen(addr string) (net.Listener, error) {
return net.Listen("tcp", addr)
}
var _ Listener = stdNetListener{}
// Implement HTTPServerFactory in terms of http.Server.
type stdHTTPServerFactory struct{}
func (stdHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer {
return &http.Server{
Addr: addr,
Handler: handler,
}
}
var _ HTTPServerFactory = stdHTTPServerFactory{}
type server struct {
hostname string
recorder record.EventRecorder // can be nil
listener Listener
httpFactory HTTPServerFactory
lock sync.Mutex
services map[types.NamespacedName]*hcInstance
}
func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) error {
hcs.lock.Lock()
defer hcs.lock.Unlock()
// Remove any that are not needed any more.
for nsn, svc := range hcs.services {
if port, found := newServices[nsn]; !found || port != svc.port {
glog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port)
if err := svc.listener.Close(); err != nil {
glog.Errorf("Close(%v): %v", svc.listener.Addr(), err)
}
delete(hcs.services, nsn)
}
}
// Add any that are needed.
for nsn, port := range newServices {
if hcs.services[nsn] != nil {
glog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port)
continue
}
glog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port)
svc := &hcInstance{port: port}
addr := fmt.Sprintf(":%d", port)
svc.server = hcs.httpFactory.New(addr, hcHandler{name: nsn, hcs: hcs})
var err error
svc.listener, err = hcs.listener.Listen(addr)
if err != nil {
msg := fmt.Sprintf("node %s failed to start healthcheck %q on port %d: %v", hcs.hostname, nsn.String(), port, err)
if hcs.recorder != nil {
hcs.recorder.Eventf(
&v1.ObjectReference{
Kind: "Service",
Namespace: nsn.Namespace,
Name: nsn.Name,
UID: types.UID(nsn.String()),
}, api.EventTypeWarning, "FailedToStartServiceHealthcheck", msg)
}
glog.Error(msg)
continue
}
hcs.services[nsn] = svc
go func(nsn types.NamespacedName, svc *hcInstance) {
// Serve() will exit when the listener is closed.
glog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port)
if err := svc.server.Serve(svc.listener); err != nil {
glog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err)
return
}
glog.V(3).Infof("Healthcheck %q closed", nsn.String())
}(nsn, svc)
}
return nil
}
type hcInstance struct {
port uint16
listener net.Listener
server HTTPServer
endpoints int // number of local endpoints for a service
}
type hcHandler struct {
name types.NamespacedName
hcs *server
}
var _ http.Handler = hcHandler{}
func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
h.hcs.lock.Lock()
svc, ok := h.hcs.services[h.name]
if !ok || svc == nil {
h.hcs.lock.Unlock()
glog.Errorf("Received request for closed healthcheck %q", h.name.String())
return
}
count := svc.endpoints
h.hcs.lock.Unlock()
resp.Header().Set("Content-Type", "application/json")
if count == 0 {
resp.WriteHeader(http.StatusServiceUnavailable)
} else {
resp.WriteHeader(http.StatusOK)
}
fmt.Fprintf(resp, strings.Trim(dedent.Dedent(fmt.Sprintf(`
{
"service": {
"namespace": %q,
"name": %q
},
"localEndpoints": %d
}
`, h.name.Namespace, h.name.Name, count)), "\n"))
}
func (hcs *server) SyncEndpoints(newEndpoints map[types.NamespacedName]int) error {
hcs.lock.Lock()
defer hcs.lock.Unlock()
for nsn, count := range newEndpoints {
if hcs.services[nsn] == nil {
glog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String())
continue
}
glog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String())
hcs.services[nsn].endpoints = count
}
for nsn, hci := range hcs.services {
if _, found := newEndpoints[nsn]; !found {
hci.endpoints = 0
}
}
return nil
}
// HealthzUpdater allows callers to update healthz timestamp only.
type HealthzUpdater interface {
UpdateTimestamp()
}
// HealthzServer returns 200 "OK" by default. Once timestamp has been
// updated, it verifies we don't exceed max no respond duration since
// last update.
type HealthzServer struct {
listener Listener
httpFactory HTTPServerFactory
clock clock.Clock
addr string
port int32
healthTimeout time.Duration
recorder record.EventRecorder
nodeRef *v1.ObjectReference
lastUpdated atomic.Value
}
// NewDefaultHealthzServer returns a default healthz http server.
func NewDefaultHealthzServer(addr string, healthTimeout time.Duration, recorder record.EventRecorder, nodeRef *v1.ObjectReference) *HealthzServer {
return newHealthzServer(nil, nil, nil, addr, healthTimeout, recorder, nodeRef)
}
func newHealthzServer(listener Listener, httpServerFactory HTTPServerFactory, c clock.Clock, addr string, healthTimeout time.Duration, recorder record.EventRecorder, nodeRef *v1.ObjectReference) *HealthzServer {
if listener == nil {
listener = stdNetListener{}
}
if httpServerFactory == nil {
httpServerFactory = stdHTTPServerFactory{}
}
if c == nil {
c = clock.RealClock{}
}
return &HealthzServer{
listener: listener,
httpFactory: httpServerFactory,
clock: c,
addr: addr,
healthTimeout: healthTimeout,
recorder: recorder,
nodeRef: nodeRef,
}
}
// UpdateTimestamp updates the lastUpdated timestamp.
func (hs *HealthzServer) UpdateTimestamp() {
hs.lastUpdated.Store(hs.clock.Now())
}
// Run starts the healthz http server and returns.
func (hs *HealthzServer) Run() {
serveMux := http.NewServeMux()
serveMux.Handle("/healthz", healthzHandler{hs: hs})
server := hs.httpFactory.New(hs.addr, serveMux)
go wait.Until(func() {
glog.V(3).Infof("Starting goroutine for healthz on %s", hs.addr)
listener, err := hs.listener.Listen(hs.addr)
if err != nil {
msg := fmt.Sprintf("Failed to start node healthz on %s: %v", hs.addr, err)
if hs.recorder != nil {
hs.recorder.Eventf(hs.nodeRef, api.EventTypeWarning, "FailedToStartNodeHealthcheck", msg)
}
glog.Error(msg)
return
}
if err := server.Serve(listener); err != nil {
glog.Errorf("Healthz closed with error: %v", err)
return
}
glog.Errorf("Unexpected healthz closed.")
}, nodeHealthzRetryInterval, wait.NeverStop)
}
type healthzHandler struct {
hs *HealthzServer
}
func (h healthzHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
lastUpdated := time.Time{}
if val := h.hs.lastUpdated.Load(); val != nil {
lastUpdated = val.(time.Time)
}
currentTime := h.hs.clock.Now()
resp.Header().Set("Content-Type", "application/json")
if !lastUpdated.IsZero() && currentTime.After(lastUpdated.Add(h.hs.healthTimeout)) {
resp.WriteHeader(http.StatusServiceUnavailable)
} else {
resp.WriteHeader(http.StatusOK)
}
fmt.Fprintf(resp, fmt.Sprintf(`{"lastUpdated": %q,"currentTime": %q}`, lastUpdated, currentTime))
}

View File

@ -1,405 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package healthcheck
import (
"encoding/json"
"net"
"net/http"
"net/http/httptest"
"testing"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/davecgh/go-spew/spew"
)
type fakeListener struct {
openPorts sets.String
}
func newFakeListener() *fakeListener {
return &fakeListener{
openPorts: sets.String{},
}
}
func (fake *fakeListener) hasPort(addr string) bool {
return fake.openPorts.Has(addr)
}
func (fake *fakeListener) Listen(addr string) (net.Listener, error) {
fake.openPorts.Insert(addr)
return &fakeNetListener{
parent: fake,
addr: addr,
}, nil
}
type fakeNetListener struct {
parent *fakeListener
addr string
}
func (fake *fakeNetListener) Accept() (net.Conn, error) {
// Not implemented
return nil, nil
}
func (fake *fakeNetListener) Close() error {
fake.parent.openPorts.Delete(fake.addr)
return nil
}
func (fake *fakeNetListener) Addr() net.Addr {
// Not implemented
return nil
}
type fakeHTTPServerFactory struct{}
func newFakeHTTPServerFactory() *fakeHTTPServerFactory {
return &fakeHTTPServerFactory{}
}
func (fake *fakeHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer {
return &fakeHTTPServer{
addr: addr,
handler: handler,
}
}
type fakeHTTPServer struct {
addr string
handler http.Handler
}
func (fake *fakeHTTPServer) Serve(listener net.Listener) error {
return nil // Cause the goroutine to return
}
func mknsn(ns, name string) types.NamespacedName {
return types.NamespacedName{
Namespace: ns,
Name: name,
}
}
type hcPayload struct {
Service struct {
Namespace string
Name string
}
LocalEndpoints int
}
type healthzPayload struct {
LastUpdated string
CurrentTime string
}
func TestServer(t *testing.T) {
listener := newFakeListener()
httpFactory := newFakeHTTPServerFactory()
hcsi := NewServer("hostname", nil, listener, httpFactory)
hcs := hcsi.(*server)
if len(hcs.services) != 0 {
t.Errorf("expected 0 services, got %d", len(hcs.services))
}
// sync nothing
hcs.SyncServices(nil)
if len(hcs.services) != 0 {
t.Errorf("expected 0 services, got %d", len(hcs.services))
}
hcs.SyncEndpoints(nil)
if len(hcs.services) != 0 {
t.Errorf("expected 0 services, got %d", len(hcs.services))
}
// sync unknown endpoints, should be dropped
hcs.SyncEndpoints(map[types.NamespacedName]int{mknsn("a", "b"): 93})
if len(hcs.services) != 0 {
t.Errorf("expected 0 services, got %d", len(hcs.services))
}
// sync a real service
nsn := mknsn("a", "b")
hcs.SyncServices(map[types.NamespacedName]uint16{nsn: 9376})
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
}
if len(listener.openPorts) != 1 {
t.Errorf("expected 1 open port, got %d\n%s", len(listener.openPorts), spew.Sdump(listener.openPorts))
}
if !listener.hasPort(":9376") {
t.Errorf("expected port :9376 to be open\n%s", spew.Sdump(listener.openPorts))
}
// test the handler
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
// sync an endpoint
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 18})
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 18 {
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
}
// test the handler
testHandler(hcs, nsn, http.StatusOK, 18, t)
// sync zero endpoints
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 0})
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
}
// test the handler
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
// put the endpoint back
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 11})
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 11 {
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
}
// sync nil endpoints
hcs.SyncEndpoints(nil)
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
}
// test the handler
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
// put the endpoint back
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 18})
if len(hcs.services) != 1 {
t.Errorf("expected 1 service, got %d", len(hcs.services))
}
if hcs.services[nsn].endpoints != 18 {
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
}
// delete the service
hcs.SyncServices(nil)
if len(hcs.services) != 0 {
t.Errorf("expected 0 services, got %d", len(hcs.services))
}
// sync multiple services
nsn1 := mknsn("a", "b")
nsn2 := mknsn("c", "d")
nsn3 := mknsn("e", "f")
nsn4 := mknsn("g", "h")
hcs.SyncServices(map[types.NamespacedName]uint16{
nsn1: 9376,
nsn2: 12909,
nsn3: 11113,
})
if len(hcs.services) != 3 {
t.Errorf("expected 3 service, got %d", len(hcs.services))
}
if hcs.services[nsn1].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn1].endpoints)
}
if hcs.services[nsn2].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn2].endpoints)
}
if hcs.services[nsn3].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn3].endpoints)
}
if len(listener.openPorts) != 3 {
t.Errorf("expected 3 open ports, got %d\n%s", len(listener.openPorts), spew.Sdump(listener.openPorts))
}
// test the handlers
testHandler(hcs, nsn1, http.StatusServiceUnavailable, 0, t)
testHandler(hcs, nsn2, http.StatusServiceUnavailable, 0, t)
testHandler(hcs, nsn3, http.StatusServiceUnavailable, 0, t)
// sync endpoints
hcs.SyncEndpoints(map[types.NamespacedName]int{
nsn1: 9,
nsn2: 3,
nsn3: 7,
})
if len(hcs.services) != 3 {
t.Errorf("expected 3 services, got %d", len(hcs.services))
}
if hcs.services[nsn1].endpoints != 9 {
t.Errorf("expected 9 endpoints, got %d", hcs.services[nsn1].endpoints)
}
if hcs.services[nsn2].endpoints != 3 {
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
}
if hcs.services[nsn3].endpoints != 7 {
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
}
// test the handlers
testHandler(hcs, nsn1, http.StatusOK, 9, t)
testHandler(hcs, nsn2, http.StatusOK, 3, t)
testHandler(hcs, nsn3, http.StatusOK, 7, t)
// sync new services
hcs.SyncServices(map[types.NamespacedName]uint16{
//nsn1: 9376, // remove it
nsn2: 12909, // leave it
nsn3: 11114, // change it
nsn4: 11878, // add it
})
if len(hcs.services) != 3 {
t.Errorf("expected 3 service, got %d", len(hcs.services))
}
if hcs.services[nsn2].endpoints != 3 {
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
}
if hcs.services[nsn3].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn3].endpoints)
}
if hcs.services[nsn4].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn4].endpoints)
}
// test the handlers
testHandler(hcs, nsn2, http.StatusOK, 3, t)
testHandler(hcs, nsn3, http.StatusServiceUnavailable, 0, t)
testHandler(hcs, nsn4, http.StatusServiceUnavailable, 0, t)
// sync endpoints
hcs.SyncEndpoints(map[types.NamespacedName]int{
nsn1: 9,
nsn2: 3,
nsn3: 7,
nsn4: 6,
})
if len(hcs.services) != 3 {
t.Errorf("expected 3 services, got %d", len(hcs.services))
}
if hcs.services[nsn2].endpoints != 3 {
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
}
if hcs.services[nsn3].endpoints != 7 {
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
}
if hcs.services[nsn4].endpoints != 6 {
t.Errorf("expected 6 endpoints, got %d", hcs.services[nsn4].endpoints)
}
// test the handlers
testHandler(hcs, nsn2, http.StatusOK, 3, t)
testHandler(hcs, nsn3, http.StatusOK, 7, t)
testHandler(hcs, nsn4, http.StatusOK, 6, t)
// sync endpoints, missing nsn2
hcs.SyncEndpoints(map[types.NamespacedName]int{
nsn3: 7,
nsn4: 6,
})
if len(hcs.services) != 3 {
t.Errorf("expected 3 services, got %d", len(hcs.services))
}
if hcs.services[nsn2].endpoints != 0 {
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn2].endpoints)
}
if hcs.services[nsn3].endpoints != 7 {
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
}
if hcs.services[nsn4].endpoints != 6 {
t.Errorf("expected 6 endpoints, got %d", hcs.services[nsn4].endpoints)
}
// test the handlers
testHandler(hcs, nsn2, http.StatusServiceUnavailable, 0, t)
testHandler(hcs, nsn3, http.StatusOK, 7, t)
testHandler(hcs, nsn4, http.StatusOK, 6, t)
}
func testHandler(hcs *server, nsn types.NamespacedName, status int, endpoints int, t *testing.T) {
handler := hcs.services[nsn].server.(*fakeHTTPServer).handler
req, err := http.NewRequest("GET", "/healthz", nil)
if err != nil {
t.Fatal(err)
}
resp := httptest.NewRecorder()
handler.ServeHTTP(resp, req)
if resp.Code != status {
t.Errorf("expected status code %v, got %v", status, resp.Code)
}
var payload hcPayload
if err := json.Unmarshal(resp.Body.Bytes(), &payload); err != nil {
t.Fatal(err)
}
if payload.Service.Name != nsn.Name || payload.Service.Namespace != nsn.Namespace {
t.Errorf("expected payload name %q, got %v", nsn.String(), payload.Service)
}
if payload.LocalEndpoints != endpoints {
t.Errorf("expected %d endpoints, got %d", endpoints, payload.LocalEndpoints)
}
}
func TestHealthzServer(t *testing.T) {
listener := newFakeListener()
httpFactory := newFakeHTTPServerFactory()
fakeClock := clock.NewFakeClock(time.Now())
hs := newHealthzServer(listener, httpFactory, fakeClock, "127.0.0.1:10256", 10*time.Second, nil, nil)
server := hs.httpFactory.New(hs.addr, healthzHandler{hs: hs})
// Should return 200 "OK" by default.
testHealthzHandler(server, http.StatusOK, t)
// Should return 503 "ServiceUnavailable" if exceed max no respond duration.
hs.UpdateTimestamp()
fakeClock.Step(25 * time.Second)
testHealthzHandler(server, http.StatusServiceUnavailable, t)
// Should return 200 "OK" if timestamp is valid.
hs.UpdateTimestamp()
fakeClock.Step(5 * time.Second)
testHealthzHandler(server, http.StatusOK, t)
}
func testHealthzHandler(server HTTPServer, status int, t *testing.T) {
handler := server.(*fakeHTTPServer).handler
req, err := http.NewRequest("GET", "/healthz", nil)
if err != nil {
t.Fatal(err)
}
resp := httptest.NewRecorder()
handler.ServeHTTP(resp, req)
if resp.Code != status {
t.Errorf("expected status code %v, got %v", status, resp.Code)
}
var payload healthzPayload
if err := json.Unmarshal(resp.Body.Bytes(), &payload); err != nil {
t.Fatal(err)
}
}

View File

@ -1,69 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"proxier.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/iptables",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/healthcheck:go_default_library",
"//pkg/proxy/metrics:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/util/async:go_default_library",
"//pkg/util/conntrack:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/net:go_default_library",
"//pkg/util/sysctl:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["proxier_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/proxy/util/testing:go_default_library",
"//pkg/util/async:go_default_library",
"//pkg/util/conntrack:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/iptables/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,7 +0,0 @@
reviewers:
- thockin
- smarterclayton
- justinsb
- freehan
- dcbw
- danwinship

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,124 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"ipset_test.go",
"proxier_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/ipvs/testing:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/proxy/util/testing:go_default_library",
"//pkg/util/ipset:go_default_library",
"//pkg/util/ipset/testing:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/iptables/testing:go_default_library",
"//pkg/util/ipvs:go_default_library",
"//pkg/util/ipvs/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"ipset.go",
"netlink.go",
"proxier.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"netlink_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"netlink_unsupported.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/proxy/ipvs",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/healthcheck:go_default_library",
"//pkg/proxy/metrics:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/util/async:go_default_library",
"//pkg/util/conntrack:go_default_library",
"//pkg/util/ipset:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/ipvs:go_default_library",
"//pkg/util/net:go_default_library",
"//pkg/util/sysctl:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/github.com/vishvananda/netlink:go_default_library",
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/proxy/ipvs/testing:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,8 +0,0 @@
reviewers:
- thockin
- brendandburns
- m1093782566
approvers:
- thockin
- brendandburns
- m1093782566

View File

@ -1,367 +0,0 @@
- [IPVS](#ipvs)
- [What is IPVS](#what-is-ipvs)
- [IPVS vs. IPTABLES](#ipvs-vs-iptables)
- [When ipvs falls back to iptables](#when-ipvs-falls-back-to-iptables)
- [Run kube-proxy in ipvs mode](#run-kube-proxy-in-ipvs-mode)
- [Prerequisite](#prerequisite)
- [Local UP Cluster](#local-up-cluster)
- [GCE Cluster](#gce-cluster)
- [Cluster Created by Kubeadm](#cluster-created-by-kubeadm)
- [Debug](#debug)
- [Check IPVS proxy rules](#check-ipvs-proxy-rules)
- [Why kube-proxy can't start IPVS mode](#why-kube-proxy-cant-start-ipvs-mode)
# IPVS
This document intends to show users
- what is IPVS
- difference between IPVS and IPTABLES
- how to run kube-proxy in ipvs mode and info on debugging
## What is IPVS
**IPVS (IP Virtual Server)** implements transport-layer load balancing, usually called Layer 4 LAN switching, as part of
Linux kernel.
IPVS runs on a host and acts as a load balancer in front of a cluster of real servers. IPVS can direct requests for TCP
and UDP-based services to the real servers, and make services of real servers appear as virtual services on a single IP address.
## IPVS vs. IPTABLES
IPVS mode was introduced in Kubernetes v1.8 and goes beta in v1.9. IPTABLES mode was added in v1.1 and become the default operating mode since v1.2. Both IPVS and IPTABLES are based on `netfilter`.
Differences between IPVS mode and IPTABLES mode are as follows:
1. IPVS provides better scalability and performance for large clusters.
2. IPVS supports more sophisticated load balancing algorithms than iptables (least load, least connections, locality, weighted, etc.).
3. IPVS supports server health checking and connection retries, etc.
### When ipvs falls back to iptables
IPVS proxier will employ iptables in doing packet filtering, SNAT and supporting NodePort type service. Specifically, ipvs proxier will fall back on iptables in the following 4 scenarios.
**1. kube-proxy starts with --masquerade-all=true**
If kube-proxy starts with `--masquerade-all=true`, ipvs proxier will masquerade all traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose there is a service with Cluster IP `10.244.5.1` and port `8080`, then the iptables installed by ipvs proxier should be like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- 0.0.0.0/0 10.244.5.1 /* default/foo:http cluster IP */ tcp dpt:8080
```
**2. Specify cluster CIDR in kube-proxy startup**
If kube-proxy starts with `--cluster-cidr=<cidr>`, ipvs proxier will masquerade off-cluster traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose kube-proxy is provided with the cluster cidr `10.244.16.0/24`, and service Cluster IP is `10.244.5.1` and port is `8080`, then the iptables installed by ipvs proxier should be like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- !10.244.16.0/24 10.244.5.1 /* default/foo:http cluster IP */ tcp dpt:8080
```
**3. Load Balancer Source Ranges is specified for LB type service**
When service's `LoadBalancerStatus.ingress.IP` is not empty and service's `LoadBalancerSourceRanges` is specified, ipvs proxier will install iptables which looks like what is shown below.
Suppose service's `LoadBalancerStatus.ingress.IP` is `10.96.1.2` and service's `LoadBalancerSourceRanges` is `10.120.2.0/24`.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-SERVICES (2 references)
target prot opt source destination
ACCEPT tcp -- 10.120.2.0/24 10.96.1.2 /* default/foo:http loadbalancer IP */ tcp dpt:8080
DROP tcp -- 0.0.0.0/0 10.96.1.2 /* default/foo:http loadbalancer IP */ tcp dpt:8080
```
**4. Support NodePort type service**
For supporting NodePort type service, ipvs will recruit the existing implementation in iptables proxier. For example,
```shell
# kubectl describe svc nginx-service
Name: nginx-service
...
Type: NodePort
IP: 10.101.28.148
Port: http 3080/TCP
NodePort: http 31604/TCP
Endpoints: 172.17.0.2:80
Session Affinity: None
# iptables -t nat -nL
[root@100-106-179-225 ~]# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- !172.16.0.0/16 10.101.28.148 /* default/nginx-service:http cluster IP */ tcp dpt:3080
KUBE-SVC-6IM33IEVEEV7U3GP tcp -- 0.0.0.0/0 10.101.28.148 /* default/nginx-service:http cluster IP */ tcp dpt:3080
KUBE-NODEPORTS all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL
Chain KUBE-NODEPORTS (1 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp dpt:31604
KUBE-SVC-6IM33IEVEEV7U3GP tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp dpt:31604
Chain KUBE-SVC-6IM33IEVEEV7U3GP (2 references)
target prot opt source destination
KUBE-SEP-Q3UCPZ54E6Q2R4UT all -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */
Chain KUBE-SEP-Q3UCPZ54E6Q2R4UT (1 references)
target prot opt source destination
KUBE-MARK-MASQ all -- 172.17.0.2 0.0.0.0/0 /* default/nginx-service:http */
DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp to:172.17.0.2:80
```
## Run kube-proxy in ipvs mode
Currently, local-up scripts, GCE scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags.
### Prerequisite
Ensure IPVS required kernel modules
```shell
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
```
1. have been compiled into the node kernel. Use
`grep -e ipvs -e nf_conntrack_ipv4 /lib/modules/$(uname -r)/modules.builtin`
and get results like the followings if compiled into kernel.
```
kernel/net/ipv4/netfilter/nf_conntrack_ipv4.ko
kernel/net/netfilter/ipvs/ip_vs.ko
kernel/net/netfilter/ipvs/ip_vs_rr.ko
kernel/net/netfilter/ipvs/ip_vs_wrr.ko
kernel/net/netfilter/ipvs/ip_vs_lc.ko
kernel/net/netfilter/ipvs/ip_vs_wlc.ko
kernel/net/netfilter/ipvs/ip_vs_fo.ko
kernel/net/netfilter/ipvs/ip_vs_ovf.ko
kernel/net/netfilter/ipvs/ip_vs_lblc.ko
kernel/net/netfilter/ipvs/ip_vs_lblcr.ko
kernel/net/netfilter/ipvs/ip_vs_dh.ko
kernel/net/netfilter/ipvs/ip_vs_sh.ko
kernel/net/netfilter/ipvs/ip_vs_sed.ko
kernel/net/netfilter/ipvs/ip_vs_nq.ko
kernel/net/netfilter/ipvs/ip_vs_ftp.ko
```
OR
2. have been loaded.
```shell
# load module <module_name>
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
# to check loaded modules, use
lsmod | grep -e ipvs -e nf_conntrack_ipv4
# or
cut -f1 -d " " /proc/modules | grep -e ip_vs -e nf_conntrack_ipv4
```
Packages such as `ipset` should also be installed on the node before using IPVS mode.
Kube-proxy will fall back to IPTABLES mode if those requirements are not met.
### Local UP Cluster
Kube-proxy will run in iptables mode by default in a [local-up cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md).
To use IPVS mode, users should export the env `KUBE_PROXY_MODE=ipvs` to specify the ipvs mode before [starting the cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md#starting-the-cluster):
```shell
# before running `hack/local-up-cluster.sh`
export KUBE_PROXY_MODE=ipvs
```
### GCE Cluster
Similar to local-up cluster, kube-proxy in [clusters running on GCE](https://kubernetes.io/docs/getting-started-guides/gce/) run in iptables mode by default. Users need to export the env `KUBE_PROXY_MODE=ipvs` before [starting a cluster](https://kubernetes.io/docs/getting-started-guides/gce/#starting-a-cluster):
```shell
#before running one of the commmands chosen to start a cluster:
# curl -sS https://get.k8s.io | bash
# wget -q -O - https://get.k8s.io | bash
# cluster/kube-up.sh
export KUBE_PROXY_MODE=ipvs
```
### Cluster Created by Kubeadm
Kube-proxy will run in iptables mode by default in a cluster deployed by [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/).
If you are using kubeadm with a [configuration file](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file), you can specify the ipvs mode adding `SupportIPVSProxyMode: true` below the `kubeProxy` field.
```json
kind: MasterConfiguration
apiVersion: kubeadm.k8s.io/v1alpha1
...
kubeProxy:
config:
featureGates: SupportIPVSProxyMode=true
mode: ipvs
...
```
before running
`kube init --config <path_to_configuration_file>`
If you are using Kubernetes v1.8, you can also add the flag `--feature-gates=SupportIPVSProxyMode=true` (deprecated since v1.9) in `kubeadm init` command
```
kubeadm init --feature-gates=SupportIPVSProxyMode=true
```
to specify the ipvs mode before deploying the cluster.
**Notes**
If ipvs mode is successfully on, you should see ipvs proxy rules (use `ipvsadm`) like
```shell
# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.0.0.1:443 rr persistent 10800
-> 192.168.0.1:6443 Masq 1 1 0
```
or similar logs occur in kube-proxy logs (for example, `/tmp/kube-proxy.log` for local-up cluster) when the local cluster is running:
```
Using ipvs Proxier.
```
While there is no ipvs proxy rules or the following logs ocuurs indicate that the kube-proxy fails to use ipvs mode:
```
Can't use ipvs proxier, trying iptables proxier
Using iptables Proxier.
```
See the following section for more details on debugging.
## Debug
### Check IPVS proxy rules
Users can use `ipvsadm` tool to check whether kube-proxy are maintaining IPVS rules correctly. For example, we have the following services in the cluster:
```
# kubectl get svc --all-namespaces
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 1d
kube-system kube-dns ClusterIP 10.0.0.10 <none> 53/UDP,53/TCP 1d
```
We may get IPVS proxy rules like:
```shell
# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.0.0.1:443 rr persistent 10800
-> 192.168.0.1:6443 Masq 1 1 0
TCP 10.0.0.10:53 rr
-> 172.17.0.2:53 Masq 1 0 0
UDP 10.0.0.10:53 rr
-> 172.17.0.2:53 Masq 1 0 0
```
### Why kube-proxy can't start IPVS mode
Use the following check list to help you solve the problems:
**1. Enable IPVS feature gateway**
For Kubernetes v1.10 and later, feature gate `SupportIPVSProxyMode` is set to `true` by default. However, you need to enable `--feature-gates=SupportIPVSProxyMode=true` explicitly for Kubernetes before v1.10.
**2. Specify proxy-mode=ipvs**
Check whether the kube-proxy mode has been set to `ipvs`.
**3. Install required kernel modules and packages**
Check whether the ipvs required kernel modules have been compiled into the kernel and packages installed. (see Prerequisite)

View File

@ -1,176 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipvs
import (
"k8s.io/apimachinery/pkg/util/sets"
utilipset "k8s.io/kubernetes/pkg/util/ipset"
utilversion "k8s.io/kubernetes/pkg/util/version"
"fmt"
"github.com/golang/glog"
)
const (
// MinIPSetCheckVersion is the min ipset version we need. IPv6 is supported in ipset 6.x
MinIPSetCheckVersion = "6.0"
kubeLoopBackIPSetComment = "Kubernetes endpoints dst ip:port, source ip for solving hairpin purpose"
kubeLoopBackIPSet = "KUBE-LOOP-BACK"
kubeClusterIPSetComment = "Kubernetes service cluster ip + port for masquerade purpose"
kubeClusterIPSet = "KUBE-CLUSTER-IP"
kubeExternalIPSetComment = "Kubernetes service external ip + port for masquerade and filter purpose"
kubeExternalIPSet = "KUBE-EXTERNAL-IP"
kubeLoadBalancerSetComment = "Kubernetes service lb portal"
kubeLoadBalancerSet = "KUBE-LOAD-BALANCER"
kubeLoadBalancerLocalSetComment = "Kubernetes service load balancer ip + port with externalTrafficPolicy=local"
kubeLoadBalancerLocalSet = "KUBE-LOAD-BALANCER-LOCAL"
kubeLoadbalancerFWSetComment = "Kubernetes service load balancer ip + port for load balancer with sourceRange"
kubeLoadbalancerFWSet = "KUBE-LOAD-BALANCER-FW"
kubeLoadBalancerSourceIPSetComment = "Kubernetes service load balancer ip + port + source IP for packet filter purpose"
kubeLoadBalancerSourceIPSet = "KUBE-LOAD-BALANCER-SOURCE-IP"
kubeLoadBalancerSourceCIDRSetComment = "Kubernetes service load balancer ip + port + source cidr for packet filter purpose"
kubeLoadBalancerSourceCIDRSet = "KUBE-LOAD-BALANCER-SOURCE-CIDR"
kubeNodePortSetTCPComment = "Kubernetes nodeport TCP port for masquerade purpose"
kubeNodePortSetTCP = "KUBE-NODE-PORT-TCP"
kubeNodePortLocalSetTCPComment = "Kubernetes nodeport TCP port with externalTrafficPolicy=local"
kubeNodePortLocalSetTCP = "KUBE-NODE-PORT-LOCAL-TCP"
kubeNodePortSetUDPComment = "Kubernetes nodeport UDP port for masquerade purpose"
kubeNodePortSetUDP = "KUBE-NODE-PORT-UDP"
kubeNodePortLocalSetUDPComment = "Kubernetes nodeport UDP port with externalTrafficPolicy=local"
kubeNodePortLocalSetUDP = "KUBE-NODE-PORT-LOCAL-UDP"
)
// IPSetVersioner can query the current ipset version.
type IPSetVersioner interface {
// returns "X.Y"
GetVersion() (string, error)
}
// IPSet wraps util/ipset which is used by IPVS proxier.
type IPSet struct {
utilipset.IPSet
// activeEntries is the current active entries of the ipset.
activeEntries sets.String
// handle is the util ipset interface handle.
handle utilipset.Interface
}
// NewIPSet initialize a new IPSet struct
func NewIPSet(handle utilipset.Interface, name string, setType utilipset.Type, isIPv6 bool, comment string) *IPSet {
hashFamily := utilipset.ProtocolFamilyIPV4
if isIPv6 {
hashFamily = utilipset.ProtocolFamilyIPV6
}
set := &IPSet{
IPSet: utilipset.IPSet{
Name: name,
SetType: setType,
HashFamily: hashFamily,
Comment: comment,
},
activeEntries: sets.NewString(),
handle: handle,
}
return set
}
func (set *IPSet) validateEntry(entry *utilipset.Entry) bool {
return entry.Validate(&set.IPSet)
}
func (set *IPSet) isEmpty() bool {
return len(set.activeEntries.UnsortedList()) == 0
}
func (set *IPSet) getComment() string {
return fmt.Sprintf("\"%s\"", set.Comment)
}
func (set *IPSet) resetEntries() {
set.activeEntries = sets.NewString()
}
func (set *IPSet) syncIPSetEntries() {
appliedEntries, err := set.handle.ListEntries(set.Name)
if err != nil {
glog.Errorf("Failed to list ip set entries, error: %v", err)
return
}
// currentIPSetEntries represents Endpoints watched from API Server.
currentIPSetEntries := sets.NewString()
for _, appliedEntry := range appliedEntries {
currentIPSetEntries.Insert(appliedEntry)
}
if !set.activeEntries.Equal(currentIPSetEntries) {
// Clean legacy entries
for _, entry := range currentIPSetEntries.Difference(set.activeEntries).List() {
if err := set.handle.DelEntry(entry, set.Name); err != nil {
if !utilipset.IsNotFoundError(err) {
glog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err)
}
} else {
glog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name)
}
}
// Create active entries
for _, entry := range set.activeEntries.Difference(currentIPSetEntries).List() {
if err := set.handle.AddEntry(entry, &set.IPSet, true); err != nil {
glog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err)
} else {
glog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name)
}
}
}
}
func ensureIPSet(set *IPSet) error {
if err := set.handle.CreateSet(&set.IPSet, true); err != nil {
glog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err)
return err
}
return nil
}
// checkMinVersion checks if ipset current version satisfies required min version
func checkMinVersion(vstring string) bool {
version, err := utilversion.ParseGeneric(vstring)
if err != nil {
glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err)
return false
}
minVersion, err := utilversion.ParseGeneric(MinIPSetCheckVersion)
if err != nil {
glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err)
return false
}
return !version.LessThan(minVersion)
}

View File

@ -1,206 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipvs
import (
"testing"
utilipset "k8s.io/kubernetes/pkg/util/ipset"
fakeipset "k8s.io/kubernetes/pkg/util/ipset/testing"
)
func TestCheckIPSetVersion(t *testing.T) {
testCases := []struct {
vstring string
valid bool
}{
// version less than "6.0" is not valid.
{"4.0", false},
{"5.1", false},
{"5.1.2", false},
// "7" is not a valid version string.
{"7", false},
{"6.0", true},
{"6.1", true},
{"6.19", true},
{"7.0", true},
{"8.1.2", true},
{"9.3.4.0", true},
{"total junk", false},
}
for i := range testCases {
valid := checkMinVersion(testCases[i].vstring)
if testCases[i].valid != valid {
t.Errorf("Expected result: %v, Got result: %v", testCases[i].valid, valid)
}
}
}
const testIPSetVersion = "v6.19"
func TestSyncIPSetEntries(t *testing.T) {
testCases := []struct {
set *utilipset.IPSet
setType utilipset.Type
ipv6 bool
activeEntries []string
currentEntries []string
expectedEntries []string
}{
{ // case 0
set: &utilipset.IPSet{
Name: "foo",
},
setType: utilipset.HashIPPort,
ipv6: false,
activeEntries: []string{"172.17.0.4,tcp:80"},
currentEntries: nil,
expectedEntries: []string{"172.17.0.4,tcp:80"},
},
{ // case 1
set: &utilipset.IPSet{
Name: "abz",
},
setType: utilipset.HashIPPort,
ipv6: true,
activeEntries: []string{"FE80::0202:B3FF:FE1E:8329,tcp:80"},
currentEntries: []string{"FE80::0202:B3FF:FE1E:8329,tcp:80"},
expectedEntries: []string{"FE80::0202:B3FF:FE1E:8329,tcp:80"},
},
{ // case 2
set: &utilipset.IPSet{
Name: "bca",
},
setType: utilipset.HashIPPort,
ipv6: false,
activeEntries: []string{"172.17.0.4,tcp:80", "172.17.0.5,tcp:80"},
currentEntries: []string{"172.17.0.5,udp:53"},
expectedEntries: []string{"172.17.0.4,tcp:80", "172.17.0.5,tcp:80"},
},
{ // case 3
set: &utilipset.IPSet{
Name: "bar",
},
setType: utilipset.HashIPPortIP,
ipv6: false,
activeEntries: []string{"172.17.0.4,tcp:80:172.17.0.4"},
currentEntries: []string{"172.17.0.4,tcp:80:172.17.0.4"},
expectedEntries: []string{"172.17.0.4,tcp:80:172.17.0.4"},
},
{ // case 4
set: &utilipset.IPSet{
Name: "baz",
},
setType: utilipset.HashIPPortIP,
ipv6: true,
activeEntries: []string{"FE80:0000:0000:0000:0202:B3FF:FE1E:8329,tcp:8080:FE80:0000:0000:0000:0202:B3FF:FE1E:8329"},
currentEntries: []string{"1111:0000:0000:0000:0202:B3FF:FE1E:8329,tcp:8081:1111:0000:0000:0000:0202:B3FF:FE1E:8329:8081"},
expectedEntries: []string{"FE80:0000:0000:0000:0202:B3FF:FE1E:8329,tcp:8080:FE80:0000:0000:0000:0202:B3FF:FE1E:8329"},
},
{ // case 5
set: &utilipset.IPSet{
Name: "NOPE",
},
setType: utilipset.HashIPPortIP,
ipv6: false,
activeEntries: []string{"172.17.0.4,tcp:80,172.17.0.9", "172.17.0.5,tcp:80,172.17.0.10"},
currentEntries: nil,
expectedEntries: []string{"172.17.0.4,tcp:80,172.17.0.9", "172.17.0.5,tcp:80,172.17.0.10"},
},
{ // case 6
set: &utilipset.IPSet{
Name: "ABC-DEF",
},
setType: utilipset.HashIPPortNet,
ipv6: false,
activeEntries: []string{"172.17.0.4,tcp:80,172.17.0.0/16", "172.17.0.5,tcp:80,172.17.0.0/16"},
currentEntries: nil,
expectedEntries: []string{"172.17.0.4,tcp:80,172.17.0.0/16", "172.17.0.5,tcp:80,172.17.0.0/16"},
},
{ // case 7
set: &utilipset.IPSet{
Name: "zar",
},
setType: utilipset.HashIPPortNet,
ipv6: true,
activeEntries: []string{"FE80::8329,tcp:8800,2001:db8::/32"},
currentEntries: []string{"FE80::8329,tcp:8800,2001:db8::/32"},
expectedEntries: []string{"FE80::8329,tcp:8800,2001:db8::/32"},
},
{ // case 8
set: &utilipset.IPSet{
Name: "bbb",
},
setType: utilipset.HashIPPortNet,
ipv6: true,
activeEntries: nil,
currentEntries: []string{"FE80::8329,udp:8801,2001:db8::/32"},
expectedEntries: nil,
},
{ // case 9
set: &utilipset.IPSet{
Name: "AAA",
},
setType: utilipset.BitmapPort,
activeEntries: nil,
currentEntries: []string{"80"},
expectedEntries: nil,
},
{ // case 10
set: &utilipset.IPSet{
Name: "c-c-c",
},
setType: utilipset.BitmapPort,
activeEntries: []string{"8080", "9090"},
currentEntries: []string{"80"},
expectedEntries: []string{"8080", "9090"},
},
{ // case 11
set: &utilipset.IPSet{
Name: "NODE-PORT",
},
setType: utilipset.BitmapPort,
activeEntries: []string{"8080"},
currentEntries: []string{"80", "9090", "8081", "8082"},
expectedEntries: []string{"8080"},
},
}
for i := range testCases {
set := NewIPSet(fakeipset.NewFake(testIPSetVersion), testCases[i].set.Name, testCases[i].setType, testCases[i].ipv6, "comment-"+testCases[i].set.Name)
if err := set.handle.CreateSet(&set.IPSet, true); err != nil {
t.Errorf("Unexpected error: %v", err)
}
for _, entry := range testCases[i].expectedEntries {
set.handle.AddEntry(entry, testCases[i].set, true)
}
set.activeEntries.Insert(testCases[i].activeEntries...)
set.syncIPSetEntries()
for _, entry := range testCases[i].expectedEntries {
found, err := set.handle.TestEntry(entry, testCases[i].set.Name)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !found {
t.Errorf("Unexpected entry 172.17.0.4,tcp:80 not found in set foo")
}
}
}
}

View File

@ -1,36 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipvs
import (
"k8s.io/apimachinery/pkg/util/sets"
)
// NetLinkHandle for revoke netlink interface
type NetLinkHandle interface {
// EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true.
EnsureAddressBind(address, devName string) (exist bool, err error)
// UnbindAddress unbind address from the interface
UnbindAddress(address, devName string) error
// EnsureDummyDevice checks if dummy device is exist and, if not, create one. If the dummy device is already exist, return true.
EnsureDummyDevice(devName string) (exist bool, err error)
// DeleteDummyDevice deletes the given dummy device by name.
DeleteDummyDevice(devName string) error
// GetLocalAddresses returns all unique local type IP addresses based on filter device interface. If filter device is not given,
// it will list all unique local type addresses.
GetLocalAddresses(filterDev string) (sets.String, error)
}

View File

@ -1,162 +0,0 @@
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipvs
import (
"fmt"
"net"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
type netlinkHandle struct {
netlink.Handle
}
// NewNetLinkHandle will crate a new NetLinkHandle
func NewNetLinkHandle() NetLinkHandle {
return &netlinkHandle{netlink.Handle{}}
}
// EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true.
func (h *netlinkHandle) EnsureAddressBind(address, devName string) (exist bool, err error) {
dev, err := h.LinkByName(devName)
if err != nil {
return false, fmt.Errorf("error get interface: %s, err: %v", devName, err)
}
addr := net.ParseIP(address)
if addr == nil {
return false, fmt.Errorf("error parse ip address: %s", address)
}
if err := h.AddrAdd(dev, &netlink.Addr{IPNet: netlink.NewIPNet(addr)}); err != nil {
// "EEXIST" will be returned if the address is already bound to device
if err == unix.EEXIST {
return true, nil
}
return false, fmt.Errorf("error bind address: %s to interface: %s, err: %v", address, devName, err)
}
return false, nil
}
// UnbindAddress makes sure IP address is unbound from the network interface.
func (h *netlinkHandle) UnbindAddress(address, devName string) error {
dev, err := h.LinkByName(devName)
if err != nil {
return fmt.Errorf("error get interface: %s, err: %v", devName, err)
}
addr := net.ParseIP(address)
if addr == nil {
return fmt.Errorf("error parse ip address: %s", address)
}
if err := h.AddrDel(dev, &netlink.Addr{IPNet: netlink.NewIPNet(addr)}); err != nil {
if err != unix.ENXIO {
return fmt.Errorf("error unbind address: %s from interface: %s, err: %v", address, devName, err)
}
}
return nil
}
// EnsureDummyDevice is part of interface
func (h *netlinkHandle) EnsureDummyDevice(devName string) (bool, error) {
_, err := h.LinkByName(devName)
if err == nil {
// found dummy device
return true, nil
}
dummy := &netlink.Dummy{
LinkAttrs: netlink.LinkAttrs{Name: devName},
}
return false, h.LinkAdd(dummy)
}
// DeleteDummyDevice is part of interface.
func (h *netlinkHandle) DeleteDummyDevice(devName string) error {
link, err := h.LinkByName(devName)
if err != nil {
_, ok := err.(netlink.LinkNotFoundError)
if ok {
return nil
}
return fmt.Errorf("error deleting a non-exist dummy device: %s, %v", devName, err)
}
dummy, ok := link.(*netlink.Dummy)
if !ok {
return fmt.Errorf("expect dummy device, got device type: %s", link.Type())
}
return h.LinkDel(dummy)
}
// GetLocalAddresses lists all LOCAL type IP addresses from host based on filter device.
// If filter device is not specified, it's equivalent to exec:
// $ ip route show table local type local proto kernel
// 10.0.0.1 dev kube-ipvs0 scope host src 10.0.0.1
// 10.0.0.10 dev kube-ipvs0 scope host src 10.0.0.10
// 10.0.0.252 dev kube-ipvs0 scope host src 10.0.0.252
// 100.106.89.164 dev eth0 scope host src 100.106.89.164
// 127.0.0.0/8 dev lo scope host src 127.0.0.1
// 127.0.0.1 dev lo scope host src 127.0.0.1
// 172.17.0.1 dev docker0 scope host src 172.17.0.1
// 192.168.122.1 dev virbr0 scope host src 192.168.122.1
// Then cut the unique src IP fields,
// --> result set: [10.0.0.1, 10.0.0.10, 10.0.0.252, 100.106.89.164, 127.0.0.1, 192.168.122.1]
// If filter device is specified, it's equivalent to exec:
// $ ip route show table local type local proto kernel dev kube-ipvs0
// 10.0.0.1 scope host src 10.0.0.1
// 10.0.0.10 scope host src 10.0.0.10
// Then cut the unique src IP fields,
// --> result set: [10.0.0.1, 10.0.0.10]
func (h *netlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
linkIndex := -1
if len(filterDev) != 0 {
link, err := h.LinkByName(filterDev)
if err != nil {
return nil, fmt.Errorf("error get filter device %s, err: %v", filterDev, err)
}
linkIndex = link.Attrs().Index
}
routeFilter := &netlink.Route{
Table: unix.RT_TABLE_LOCAL,
Type: unix.RTN_LOCAL,
Protocol: unix.RTPROT_KERNEL,
}
filterMask := netlink.RT_FILTER_TABLE | netlink.RT_FILTER_TYPE | netlink.RT_FILTER_PROTOCOL
// find filter device
if linkIndex != -1 {
routeFilter.LinkIndex = linkIndex
filterMask |= netlink.RT_FILTER_OIF
}
routes, err := h.RouteListFiltered(netlink.FAMILY_ALL, routeFilter, filterMask)
if err != nil {
return nil, fmt.Errorf("error list route table, err: %v", err)
}
res := sets.NewString()
for _, route := range routes {
if route.Src != nil {
res.Insert(route.Src.String())
}
}
return res, nil
}

View File

@ -1,58 +0,0 @@
// +build !linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipvs
import (
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
)
type emptyHandle struct {
}
// NewNetLinkHandle will create an EmptyHandle
func NewNetLinkHandle() NetLinkHandle {
return &emptyHandle{}
}
// EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true.
func (h *emptyHandle) EnsureAddressBind(address, devName string) (exist bool, err error) {
return false, fmt.Errorf("netlink not supported for this platform")
}
// UnbindAddress unbind address from the interface
func (h *emptyHandle) UnbindAddress(address, devName string) error {
return fmt.Errorf("netlink not supported for this platform")
}
// EnsureDummyDevice is part of interface
func (h *emptyHandle) EnsureDummyDevice(devName string) (bool, error) {
return false, fmt.Errorf("netlink is not supported in this platform")
}
// DeleteDummyDevice is part of interface.
func (h *emptyHandle) DeleteDummyDevice(devName string) error {
return fmt.Errorf("netlink is not supported in this platform")
}
// GetLocalAddresses is part of interface.
func (h *emptyHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
return nil, fmt.Errorf("netlink is not supported in this platform")
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,43 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"fake.go",
"util.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/ipvs/testing",
tags = ["automanaged"],
deps = [
"//pkg/util/ipset:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["fake_test.go"],
embed = [":go_default_library"],
deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"],
)

View File

@ -1,91 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
)
// FakeNetlinkHandle mock implementation of proxy NetlinkHandle
type FakeNetlinkHandle struct {
// localAddresses is a network interface name to all of its IP addresses map, e.g.
// eth0 -> [1.2.3.4, 10.20.30.40]
localAddresses map[string][]string
}
// NewFakeNetlinkHandle will create a new FakeNetlinkHandle
func NewFakeNetlinkHandle() *FakeNetlinkHandle {
fake := &FakeNetlinkHandle{
localAddresses: make(map[string][]string),
}
return fake
}
// EnsureAddressBind is a mock implementation
func (h *FakeNetlinkHandle) EnsureAddressBind(address, devName string) (exist bool, err error) {
return false, nil
}
// UnbindAddress is a mock implementation
func (h *FakeNetlinkHandle) UnbindAddress(address, devName string) error {
return nil
}
// EnsureDummyDevice is a mock implementation
func (h *FakeNetlinkHandle) EnsureDummyDevice(devName string) (bool, error) {
return false, nil
}
// DeleteDummyDevice is a mock implementation
func (h *FakeNetlinkHandle) DeleteDummyDevice(devName string) error {
return nil
}
// GetLocalAddresses is a mock implementation
func (h *FakeNetlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
res := sets.NewString()
if len(filterDev) != 0 {
// list all addresses from a given network interface.
for _, addr := range h.localAddresses[filterDev] {
res.Insert(addr)
}
return res, nil
}
// If filterDev is not given, will list all addresses from all available network interface.
for linkName := range h.localAddresses {
// list all addresses from a given network interface.
for _, addr := range h.localAddresses[linkName] {
res.Insert(addr)
}
}
return res, nil
}
// SetLocalAddresses set IP addresses to the given interface device. It's not part of interface.
func (h *FakeNetlinkHandle) SetLocalAddresses(dev string, ips ...string) error {
if h.localAddresses == nil {
h.localAddresses = make(map[string][]string)
}
if len(dev) == 0 {
return fmt.Errorf("device name can't be empty")
}
h.localAddresses[dev] = make([]string, 0)
h.localAddresses[dev] = append(h.localAddresses[dev], ips...)
return nil
}

View File

@ -1,49 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"reflect"
"testing"
"k8s.io/apimachinery/pkg/util/sets"
)
func TestSetGetLocalAddresses(t *testing.T) {
fake := NewFakeNetlinkHandle()
fake.SetLocalAddresses("eth0", "1.2.3.4")
expected := sets.NewString("1.2.3.4")
addr, _ := fake.GetLocalAddresses("eth0")
if !reflect.DeepEqual(expected, addr) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, addr)
}
list, _ := fake.GetLocalAddresses("")
if !reflect.DeepEqual(expected, list) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, list)
}
fake.SetLocalAddresses("lo", "127.0.0.1")
expected = sets.NewString("127.0.0.1")
addr, _ = fake.GetLocalAddresses("lo")
if !reflect.DeepEqual(expected, addr) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, addr)
}
list, _ = fake.GetLocalAddresses("")
expected = sets.NewString("1.2.3.4", "127.0.0.1")
if !reflect.DeepEqual(expected, list) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, list)
}
}

View File

@ -1,51 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
utilipset "k8s.io/kubernetes/pkg/util/ipset"
)
// ExpectedVirtualServer is the expected ipvs rules with VirtualServer and RealServer
// VSNum is the expected ipvs virtual server number
// IP:Port protocol is the expected ipvs vs info
// RS is the RealServer of this expected VirtualServer
type ExpectedVirtualServer struct {
VSNum int
IP string
Port uint16
Protocol string
RS []ExpectedRealServer
}
// ExpectedRealServer is the expected ipvs RealServer
type ExpectedRealServer struct {
IP string
Port uint16
}
// ExpectedIptablesChain is a map of expected iptables chain and jump rules
type ExpectedIptablesChain map[string][]ExpectedIptablesRule
// ExpectedIptablesRule is the expected iptables rules with jump chain and match ipset name
type ExpectedIptablesRule struct {
JumpChain string
MatchSet string
}
// ExpectedIPSet is the expected ipset with set name and entries name
type ExpectedIPSet map[string][]*utilipset.Entry

View File

@ -1,23 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["metrics.go"],
importpath = "k8s.io/kubernetes/pkg/proxy/metrics",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,52 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
)
const kubeProxySubsystem = "kubeproxy"
var (
// SyncProxyRulesLatency is the latency of one round of kube-proxy syncing proxy rules.
SyncProxyRulesLatency = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: kubeProxySubsystem,
Name: "sync_proxy_rules_latency_microseconds",
Help: "SyncProxyRules latency",
Buckets: prometheus.ExponentialBuckets(1000, 2, 15),
},
)
)
var registerMetricsOnce sync.Once
// RegisterMetrics registers sync proxy rules latency metrics
func RegisterMetrics() {
registerMetricsOnce.Do(func() {
prometheus.MustRegister(SyncProxyRulesLatency)
})
}
// SinceInMicroseconds gets the time since the specified start in microseconds.
func SinceInMicroseconds(start time.Time) float64 {
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
}

View File

@ -1,343 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"fmt"
"net"
"reflect"
"strings"
"sync"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
apiservice "k8s.io/kubernetes/pkg/api/service"
api "k8s.io/kubernetes/pkg/apis/core"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
utilnet "k8s.io/kubernetes/pkg/util/net"
)
// BaseServiceInfo contains base information that defines a service.
// This could be used directly by proxier while processing services,
// or can be used for constructing a more specific ServiceInfo struct
// defined by the proxier if needed.
type BaseServiceInfo struct {
ClusterIP net.IP
Port int
Protocol api.Protocol
NodePort int
LoadBalancerStatus api.LoadBalancerStatus
SessionAffinityType api.ServiceAffinity
StickyMaxAgeSeconds int
ExternalIPs []string
LoadBalancerSourceRanges []string
HealthCheckNodePort int
OnlyNodeLocalEndpoints bool
}
var _ ServicePort = &BaseServiceInfo{}
// String is part of ServicePort interface.
func (info *BaseServiceInfo) String() string {
return fmt.Sprintf("%s:%d/%s", info.ClusterIP, info.Port, info.Protocol)
}
// ClusterIPString is part of ServicePort interface.
func (info *BaseServiceInfo) ClusterIPString() string {
return info.ClusterIP.String()
}
// GetProtocol is part of ServicePort interface.
func (info *BaseServiceInfo) GetProtocol() api.Protocol {
return info.Protocol
}
// GetHealthCheckNodePort is part of ServicePort interface.
func (info *BaseServiceInfo) GetHealthCheckNodePort() int {
return info.HealthCheckNodePort
}
func (sct *ServiceChangeTracker) newBaseServiceInfo(port *api.ServicePort, service *api.Service) *BaseServiceInfo {
onlyNodeLocalEndpoints := false
if apiservice.RequestsOnlyLocalTraffic(service) {
onlyNodeLocalEndpoints = true
}
var stickyMaxAgeSeconds int
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
// Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP
stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
info := &BaseServiceInfo{
ClusterIP: net.ParseIP(service.Spec.ClusterIP),
Port: int(port.Port),
Protocol: port.Protocol,
NodePort: int(port.NodePort),
// Deep-copy in case the service instance changes
LoadBalancerStatus: *service.Status.LoadBalancer.DeepCopy(),
SessionAffinityType: service.Spec.SessionAffinity,
StickyMaxAgeSeconds: stickyMaxAgeSeconds,
OnlyNodeLocalEndpoints: onlyNodeLocalEndpoints,
}
if sct.isIPv6Mode == nil {
info.ExternalIPs = make([]string, len(service.Spec.ExternalIPs))
info.LoadBalancerSourceRanges = make([]string, len(service.Spec.LoadBalancerSourceRanges))
copy(info.LoadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges)
copy(info.ExternalIPs, service.Spec.ExternalIPs)
} else {
// Filter out the incorrect IP version case.
// If ExternalIPs and LoadBalancerSourceRanges on service contains incorrect IP versions,
// only filter out the incorrect ones.
var incorrectIPs []string
info.ExternalIPs, incorrectIPs = utilnet.FilterIncorrectIPVersion(service.Spec.ExternalIPs, *sct.isIPv6Mode)
if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "externalIPs", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
}
info.LoadBalancerSourceRanges, incorrectIPs = utilnet.FilterIncorrectCIDRVersion(service.Spec.LoadBalancerSourceRanges, *sct.isIPv6Mode)
if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "loadBalancerSourceRanges", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
}
}
if apiservice.NeedsHealthCheck(service) {
p := service.Spec.HealthCheckNodePort
if p == 0 {
glog.Errorf("Service %s/%s has no healthcheck nodeport", service.Namespace, service.Name)
} else {
info.HealthCheckNodePort = int(p)
}
}
return info
}
type makeServicePortFunc func(*api.ServicePort, *api.Service, *BaseServiceInfo) ServicePort
// serviceChange contains all changes to services that happened since proxy rules were synced. For a single object,
// changes are accumulated, i.e. previous is state from before applying the changes,
// current is state after applying all of the changes.
type serviceChange struct {
previous ServiceMap
current ServiceMap
}
// ServiceChangeTracker carries state about uncommitted changes to an arbitrary number of
// Services, keyed by their namespace and name.
type ServiceChangeTracker struct {
// lock protects items.
lock sync.Mutex
// items maps a service to its serviceChange.
items map[types.NamespacedName]*serviceChange
// makeServiceInfo allows proxier to inject customized information when processing service.
makeServiceInfo makeServicePortFunc
// isIPv6Mode indicates if change tracker is under IPv6/IPv4 mode. Nil means not applicable.
isIPv6Mode *bool
recorder record.EventRecorder
}
// NewServiceChangeTracker initializes a ServiceChangeTracker
func NewServiceChangeTracker(makeServiceInfo makeServicePortFunc, isIPv6Mode *bool, recorder record.EventRecorder) *ServiceChangeTracker {
return &ServiceChangeTracker{
items: make(map[types.NamespacedName]*serviceChange),
makeServiceInfo: makeServiceInfo,
isIPv6Mode: isIPv6Mode,
recorder: recorder,
}
}
// Update updates given service's change map based on the <previous, current> service pair. It returns true if items changed,
// otherwise return false. Update can be used to add/update/delete items of ServiceChangeMap. For example,
// Add item
// - pass <nil, service> as the <previous, current> pair.
// Update item
// - pass <oldService, service> as the <previous, current> pair.
// Delete item
// - pass <service, nil> as the <previous, current> pair.
func (sct *ServiceChangeTracker) Update(previous, current *api.Service) bool {
svc := current
if svc == nil {
svc = previous
}
// previous == nil && current == nil is unexpected, we should return false directly.
if svc == nil {
return false
}
namespacedName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}
sct.lock.Lock()
defer sct.lock.Unlock()
change, exists := sct.items[namespacedName]
if !exists {
change = &serviceChange{}
change.previous = sct.serviceToServiceMap(previous)
sct.items[namespacedName] = change
}
change.current = sct.serviceToServiceMap(current)
// if change.previous equal to change.current, it means no change
if reflect.DeepEqual(change.previous, change.current) {
delete(sct.items, namespacedName)
}
return len(sct.items) > 0
}
// UpdateServiceMapResult is the updated results after applying service changes.
type UpdateServiceMapResult struct {
// HCServiceNodePorts is a map of Service names to node port numbers which indicate the health of that Service on this Node.
// The value(uint16) of HCServices map is the service health check node port.
HCServiceNodePorts map[types.NamespacedName]uint16
// UDPStaleClusterIP holds stale (no longer assigned to a Service) Service IPs that had UDP ports.
// Callers can use this to abort timeout-waits or clear connection-tracking information.
UDPStaleClusterIP sets.String
}
// UpdateServiceMap updates ServiceMap based on the given changes.
func UpdateServiceMap(serviceMap ServiceMap, changes *ServiceChangeTracker) (result UpdateServiceMapResult) {
result.UDPStaleClusterIP = sets.NewString()
serviceMap.apply(changes, result.UDPStaleClusterIP)
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to serviceMap.
result.HCServiceNodePorts = make(map[types.NamespacedName]uint16)
for svcPortName, info := range serviceMap {
if info.GetHealthCheckNodePort() != 0 {
result.HCServiceNodePorts[svcPortName.NamespacedName] = uint16(info.GetHealthCheckNodePort())
}
}
return result
}
// ServiceMap maps a service to its ServicePort.
type ServiceMap map[ServicePortName]ServicePort
// serviceToServiceMap translates a single Service object to a ServiceMap.
//
// NOTE: service object should NOT be modified.
func (sct *ServiceChangeTracker) serviceToServiceMap(service *api.Service) ServiceMap {
if service == nil {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if utilproxy.ShouldSkipService(svcName, service) {
return nil
}
if len(service.Spec.ClusterIP) != 0 {
// Filter out the incorrect IP version case.
// If ClusterIP on service has incorrect IP version, service itself will be ignored.
if sct.isIPv6Mode != nil && utilnet.IsIPv6String(service.Spec.ClusterIP) != *sct.isIPv6Mode {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "clusterIP", service.Spec.ClusterIP, service.Namespace, service.Name, service.UID)
return nil
}
}
serviceMap := make(ServiceMap)
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
svcPortName := ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
baseSvcInfo := sct.newBaseServiceInfo(servicePort, service)
if sct.makeServiceInfo != nil {
serviceMap[svcPortName] = sct.makeServiceInfo(servicePort, service, baseSvcInfo)
} else {
serviceMap[svcPortName] = baseSvcInfo
}
}
return serviceMap
}
// apply the changes to ServiceMap and update the stale udp cluster IP set. The UDPStaleClusterIP argument is passed in to store the
// udp protocol service cluster ip when service is deleted from the ServiceMap.
func (serviceMap *ServiceMap) apply(changes *ServiceChangeTracker, UDPStaleClusterIP sets.String) {
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
serviceMap.merge(change.current)
// filter out the Update event of current changes from previous changes before calling unmerge() so that can
// skip deleting the Update events.
change.previous.filter(change.current)
serviceMap.unmerge(change.previous, UDPStaleClusterIP)
}
// clear changes after applying them to ServiceMap.
changes.items = make(map[types.NamespacedName]*serviceChange)
return
}
// merge adds other ServiceMap's elements to current ServiceMap.
// If collision, other ALWAYS win. Otherwise add the other to current.
// In other words, if some elements in current collisions with other, update the current by other.
// It returns a string type set which stores all the newly merged services' identifier, ServicePortName.String(), to help users
// tell if a service is deleted or updated.
// The returned value is one of the arguments of ServiceMap.unmerge().
// ServiceMap A Merge ServiceMap B will do following 2 things:
// * update ServiceMap A.
// * produce a string set which stores all other ServiceMap's ServicePortName.String().
// For example,
// - A{}
// - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - produce string set {"ns/cluster-ip:http"}
// - A{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 345, "UDP"}}
// - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - produce string set {"ns/cluster-ip:http"}
func (sm *ServiceMap) merge(other ServiceMap) sets.String {
// existingPorts is going to store all identifiers of all services in `other` ServiceMap.
existingPorts := sets.NewString()
for svcPortName, info := range other {
// Take ServicePortName.String() as the newly merged service's identifier and put it into existingPorts.
existingPorts.Insert(svcPortName.String())
_, exists := (*sm)[svcPortName]
if !exists {
glog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String())
} else {
glog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String())
}
(*sm)[svcPortName] = info
}
return existingPorts
}
// filter filters out elements from ServiceMap base on given ports string sets.
func (sm *ServiceMap) filter(other ServiceMap) {
for svcPortName := range *sm {
// skip the delete for Update event.
if _, ok := other[svcPortName]; ok {
delete(*sm, svcPortName)
}
}
}
// unmerge deletes all other ServiceMap's elements from current ServiceMap. We pass in the UDPStaleClusterIP strings sets
// for storing the stale udp service cluster IPs. We will clear stale udp connection base on UDPStaleClusterIP later
func (sm *ServiceMap) unmerge(other ServiceMap, UDPStaleClusterIP sets.String) {
for svcPortName := range other {
info, exists := (*sm)[svcPortName]
if exists {
glog.V(1).Infof("Removing service port %q", svcPortName)
if info.GetProtocol() == api.ProtocolUDP {
UDPStaleClusterIP.Insert(info.ClusterIPString())
}
delete(*sm, svcPortName)
} else {
glog.Errorf("Service port %q doesn't exists", svcPortName)
}
}
}

View File

@ -1,649 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"net"
"testing"
"github.com/davecgh/go-spew/spew"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
api "k8s.io/kubernetes/pkg/apis/core"
)
const testHostname = "test-hostname"
func makeTestServiceInfo(clusterIP string, port int, protocol string, healthcheckNodePort int, svcInfoFuncs ...func(*BaseServiceInfo)) *BaseServiceInfo {
info := &BaseServiceInfo{
ClusterIP: net.ParseIP(clusterIP),
Port: port,
Protocol: api.Protocol(protocol),
}
if healthcheckNodePort != 0 {
info.HealthCheckNodePort = healthcheckNodePort
}
for _, svcInfoFunc := range svcInfoFuncs {
svcInfoFunc(info)
}
return info
}
func makeTestService(namespace, name string, svcFunc func(*api.Service)) *api.Service {
svc := &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{},
},
Spec: api.ServiceSpec{},
Status: api.ServiceStatus{},
}
svcFunc(svc)
return svc
}
func addTestPort(array []api.ServicePort, name string, protocol api.Protocol, port, nodeport int32, targetPort int) []api.ServicePort {
svcPort := api.ServicePort{
Name: name,
Protocol: protocol,
Port: port,
NodePort: nodeport,
TargetPort: intstr.FromInt(targetPort),
}
return append(array, svcPort)
}
func makeNSN(namespace, name string) types.NamespacedName {
return types.NamespacedName{Namespace: namespace, Name: name}
}
func makeServicePortName(ns, name, port string) ServicePortName {
return ServicePortName{
NamespacedName: makeNSN(ns, name),
Port: port,
}
}
func TestServiceToServiceMap(t *testing.T) {
svcTracker := NewServiceChangeTracker(nil, nil, nil)
trueVal := true
falseVal := false
testClusterIPv4 := "10.0.0.1"
testExternalIPv4 := "8.8.8.8"
testSourceRangeIPv4 := "0.0.0.0/1"
testClusterIPv6 := "2001:db8:85a3:0:0:8a2e:370:7334"
testExternalIPv6 := "2001:db8:85a3:0:0:8a2e:370:7335"
testSourceRangeIPv6 := "2001:db8::/32"
testCases := []struct {
desc string
service *api.Service
expected map[ServicePortName]*BaseServiceInfo
isIPv6Mode *bool
}{
{
desc: "nothing",
service: nil,
expected: map[ServicePortName]*BaseServiceInfo{},
},
{
desc: "headless service",
service: makeTestService("ns2", "headless", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = api.ClusterIPNone
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
}),
expected: map[ServicePortName]*BaseServiceInfo{},
},
{
desc: "headless service without port",
service: makeTestService("ns2", "headless-without-port", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = api.ClusterIPNone
}),
expected: map[ServicePortName]*BaseServiceInfo{},
},
{
desc: "cluster ip service",
service: makeTestService("ns2", "cluster-ip", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p1", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "UDP", 1235, 5321, 0)
}),
expected: map[ServicePortName]*BaseServiceInfo{
makeServicePortName("ns2", "cluster-ip", "p1"): makeTestServiceInfo("172.16.55.4", 1234, "UDP", 0),
makeServicePortName("ns2", "cluster-ip", "p2"): makeTestServiceInfo("172.16.55.4", 1235, "UDP", 0),
},
},
{
desc: "nodeport service",
service: makeTestService("ns2", "node-port", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeNodePort
svc.Spec.ClusterIP = "172.16.55.10"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 345, 678, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "TCP", 344, 677, 0)
}),
expected: map[ServicePortName]*BaseServiceInfo{
makeServicePortName("ns2", "node-port", "port1"): makeTestServiceInfo("172.16.55.10", 345, "UDP", 0),
makeServicePortName("ns2", "node-port", "port2"): makeTestServiceInfo("172.16.55.10", 344, "TCP", 0),
},
},
{
desc: "load balancer service",
service: makeTestService("ns1", "load-balancer", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.11"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port3", "UDP", 8675, 30061, 7000)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port4", "UDP", 8676, 30062, 7001)
svc.Status.LoadBalancer = api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{IP: "10.1.2.4"},
},
}
}),
expected: map[ServicePortName]*BaseServiceInfo{
makeServicePortName("ns1", "load-balancer", "port3"): makeTestServiceInfo("172.16.55.11", 8675, "UDP", 0),
makeServicePortName("ns1", "load-balancer", "port4"): makeTestServiceInfo("172.16.55.11", 8676, "UDP", 0),
},
},
{
desc: "load balancer service with only local traffic policy",
service: makeTestService("ns1", "only-local-load-balancer", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.12"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "portx", "UDP", 8677, 30063, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "porty", "UDP", 8678, 30064, 7003)
svc.Status.LoadBalancer = api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
}),
expected: map[ServicePortName]*BaseServiceInfo{
makeServicePortName("ns1", "only-local-load-balancer", "portx"): makeTestServiceInfo("172.16.55.12", 8677, "UDP", 345),
makeServicePortName("ns1", "only-local-load-balancer", "porty"): makeTestServiceInfo("172.16.55.12", 8678, "UDP", 345),
},
},
{
desc: "external name service",
service: makeTestService("ns2", "external-name", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeExternalName
svc.Spec.ClusterIP = "172.16.55.4" // Should be ignored
svc.Spec.ExternalName = "foo2.bar.com"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "portz", "UDP", 1235, 5321, 0)
}),
expected: map[ServicePortName]*BaseServiceInfo{},
},
{
desc: "service with ipv6 clusterIP under ipv4 mode, service should be filtered",
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "invalidIPv6InIPV4Mode",
Namespace: "test",
},
Spec: api.ServiceSpec{
ClusterIP: testClusterIPv6,
Ports: []api.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
},
},
},
},
isIPv6Mode: &falseVal,
},
{
desc: "service with ipv4 clusterIP under ipv6 mode, service should be filtered",
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "invalidIPv4InIPV6Mode",
Namespace: "test",
},
Spec: api.ServiceSpec{
ClusterIP: testClusterIPv4,
Ports: []api.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
},
},
},
},
isIPv6Mode: &trueVal,
},
{
desc: "service with ipv4 configurations under ipv4 mode",
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "validIPv4",
Namespace: "test",
},
Spec: api.ServiceSpec{
ClusterIP: testClusterIPv4,
ExternalIPs: []string{testExternalIPv4},
LoadBalancerSourceRanges: []string{testSourceRangeIPv4},
Ports: []api.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
},
},
},
},
expected: map[ServicePortName]*BaseServiceInfo{
makeServicePortName("test", "validIPv4", "testPort"): makeTestServiceInfo(testClusterIPv4, 12345, "TCP", 0, func(info *BaseServiceInfo) {
info.ExternalIPs = []string{testExternalIPv4}
info.LoadBalancerSourceRanges = []string{testSourceRangeIPv4}
}),
},
isIPv6Mode: &falseVal,
},
{
desc: "service with ipv6 configurations under ipv6 mode",
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "validIPv6",
Namespace: "test",
},
Spec: api.ServiceSpec{
ClusterIP: testClusterIPv6,
ExternalIPs: []string{testExternalIPv6},
LoadBalancerSourceRanges: []string{testSourceRangeIPv6},
Ports: []api.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
},
},
},
},
expected: map[ServicePortName]*BaseServiceInfo{
makeServicePortName("test", "validIPv6", "testPort"): makeTestServiceInfo(testClusterIPv6, 12345, "TCP", 0, func(info *BaseServiceInfo) {
info.ExternalIPs = []string{testExternalIPv6}
info.LoadBalancerSourceRanges = []string{testSourceRangeIPv6}
}),
},
isIPv6Mode: &trueVal,
},
{
desc: "service with both ipv4 and ipv6 configurations under ipv4 mode, ipv6 fields should be filtered",
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "filterIPv6InIPV4Mode",
Namespace: "test",
},
Spec: api.ServiceSpec{
ClusterIP: testClusterIPv4,
ExternalIPs: []string{testExternalIPv4, testExternalIPv6},
LoadBalancerSourceRanges: []string{testSourceRangeIPv4, testSourceRangeIPv6},
Ports: []api.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
},
},
},
},
expected: map[ServicePortName]*BaseServiceInfo{
makeServicePortName("test", "filterIPv6InIPV4Mode", "testPort"): makeTestServiceInfo(testClusterIPv4, 12345, "TCP", 0, func(info *BaseServiceInfo) {
info.ExternalIPs = []string{testExternalIPv4}
info.LoadBalancerSourceRanges = []string{testSourceRangeIPv4}
}),
},
isIPv6Mode: &falseVal,
},
{
desc: "service with both ipv4 and ipv6 configurations under ipv6 mode, ipv4 fields should be filtered",
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "filterIPv4InIPV6Mode",
Namespace: "test",
},
Spec: api.ServiceSpec{
ClusterIP: testClusterIPv6,
ExternalIPs: []string{testExternalIPv4, testExternalIPv6},
LoadBalancerSourceRanges: []string{testSourceRangeIPv4, testSourceRangeIPv6},
Ports: []api.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
},
},
},
},
expected: map[ServicePortName]*BaseServiceInfo{
makeServicePortName("test", "filterIPv4InIPV6Mode", "testPort"): makeTestServiceInfo(testClusterIPv6, 12345, "TCP", 0, func(info *BaseServiceInfo) {
info.ExternalIPs = []string{testExternalIPv6}
info.LoadBalancerSourceRanges = []string{testSourceRangeIPv6}
}),
},
isIPv6Mode: &trueVal,
},
}
for _, tc := range testCases {
svcTracker.isIPv6Mode = tc.isIPv6Mode
// outputs
newServices := svcTracker.serviceToServiceMap(tc.service)
if len(newServices) != len(tc.expected) {
t.Errorf("[%s] expected %d new, got %d: %v", tc.desc, len(tc.expected), len(newServices), spew.Sdump(newServices))
}
for svcKey, expectedInfo := range tc.expected {
svcInfo := newServices[svcKey].(*BaseServiceInfo)
if !svcInfo.ClusterIP.Equal(expectedInfo.ClusterIP) ||
svcInfo.Port != expectedInfo.Port ||
svcInfo.Protocol != expectedInfo.Protocol ||
svcInfo.HealthCheckNodePort != expectedInfo.HealthCheckNodePort ||
!sets.NewString(svcInfo.ExternalIPs...).Equal(sets.NewString(expectedInfo.ExternalIPs...)) ||
!sets.NewString(svcInfo.LoadBalancerSourceRanges...).Equal(sets.NewString(expectedInfo.LoadBalancerSourceRanges...)) {
t.Errorf("[%s] expected new[%v]to be %v, got %v", tc.desc, svcKey, expectedInfo, *svcInfo)
}
}
}
}
type FakeProxier struct {
endpointsChanges *EndpointChangeTracker
serviceChanges *ServiceChangeTracker
serviceMap ServiceMap
endpointsMap EndpointsMap
hostname string
}
func newFakeProxier() *FakeProxier {
return &FakeProxier{
serviceMap: make(ServiceMap),
serviceChanges: NewServiceChangeTracker(nil, nil, nil),
endpointsMap: make(EndpointsMap),
endpointsChanges: NewEndpointChangeTracker(testHostname, nil, nil, nil),
}
}
func makeServiceMap(fake *FakeProxier, allServices ...*api.Service) {
for i := range allServices {
fake.addService(allServices[i])
}
}
func (fake *FakeProxier) addService(service *api.Service) {
fake.serviceChanges.Update(nil, service)
}
func (fake *FakeProxier) updateService(oldService *api.Service, service *api.Service) {
fake.serviceChanges.Update(oldService, service)
}
func (fake *FakeProxier) deleteService(service *api.Service) {
fake.serviceChanges.Update(service, nil)
}
func TestUpdateServiceMapHeadless(t *testing.T) {
fp := newFakeProxier()
makeServiceMap(fp,
makeTestService("ns2", "headless", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = api.ClusterIPNone
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
}),
makeTestService("ns2", "headless-without-port", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = api.ClusterIPNone
}),
)
// Headless service should be ignored
result := UpdateServiceMap(fp.serviceMap, fp.serviceChanges)
if len(fp.serviceMap) != 0 {
t.Errorf("expected service map length 0, got %d", len(fp.serviceMap))
}
// No proxied services, so no healthchecks
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %d", len(result.HCServiceNodePorts))
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
}
func TestUpdateServiceTypeExternalName(t *testing.T) {
fp := newFakeProxier()
makeServiceMap(fp,
makeTestService("ns2", "external-name", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeExternalName
svc.Spec.ClusterIP = "172.16.55.4" // Should be ignored
svc.Spec.ExternalName = "foo2.bar.com"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
}),
)
result := UpdateServiceMap(fp.serviceMap, fp.serviceChanges)
if len(fp.serviceMap) != 0 {
t.Errorf("expected service map length 0, got %v", fp.serviceMap)
}
// No proxied services, so no healthchecks
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP)
}
}
func TestBuildServiceMapAddRemove(t *testing.T) {
fp := newFakeProxier()
services := []*api.Service{
makeTestService("ns2", "cluster-ip", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "UDP", 1235, 5321, 0)
}),
makeTestService("ns2", "node-port", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeNodePort
svc.Spec.ClusterIP = "172.16.55.10"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 345, 678, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "TCP", 344, 677, 0)
}),
makeTestService("ns1", "load-balancer", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.11"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
svc.Status.LoadBalancer = api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{IP: "10.1.2.4"},
},
}
}),
makeTestService("ns1", "only-local-load-balancer", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.12"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
svc.Status.LoadBalancer = api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
}),
}
for i := range services {
fp.addService(services[i])
}
result := UpdateServiceMap(fp.serviceMap, fp.serviceChanges)
if len(fp.serviceMap) != 8 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
// The only-local-loadbalancer ones get added
if len(result.HCServiceNodePorts) != 1 {
t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts)
} else {
nsn := makeNSN("ns1", "only-local-load-balancer")
if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 {
t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts)
}
}
if len(result.UDPStaleClusterIP) != 0 {
// Services only added, so nothing stale yet
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
// Remove some stuff
// oneService is a modification of services[0] with removed first port.
oneService := makeTestService("ns2", "cluster-ip", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "UDP", 1235, 5321, 0)
})
fp.updateService(services[0], oneService)
fp.deleteService(services[1])
fp.deleteService(services[2])
fp.deleteService(services[3])
result = UpdateServiceMap(fp.serviceMap, fp.serviceChanges)
if len(fp.serviceMap) != 1 {
t.Errorf("expected service map length 1, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts)
}
// All services but one were deleted. While you'd expect only the ClusterIPs
// from the three deleted services here, we still have the ClusterIP for
// the not-deleted service, because one of it's ServicePorts was deleted.
expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"}
if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) {
t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.UnsortedList())
}
for _, ip := range expectedStaleUDPServices {
if !result.UDPStaleClusterIP.Has(ip) {
t.Errorf("expected stale UDP service service %s", ip)
}
}
}
func TestBuildServiceMapServiceUpdate(t *testing.T) {
fp := newFakeProxier()
servicev1 := makeTestService("ns1", "svc1", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p1", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "TCP", 1235, 5321, 0)
})
servicev2 := makeTestService("ns1", "svc1", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p1", "UDP", 1234, 4321, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "TCP", 1235, 5321, 7003)
svc.Status.LoadBalancer = api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
})
fp.addService(servicev1)
result := UpdateServiceMap(fp.serviceMap, fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
// Services only added, so nothing stale yet
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
// Change service to load-balancer
fp.updateService(servicev1, servicev2)
result = UpdateServiceMap(fp.serviceMap, fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 1 {
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
}
// No change; make sure the service map stays the same and there are
// no health-check changes
fp.updateService(servicev2, servicev2)
result = UpdateServiceMap(fp.serviceMap, fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 1 {
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
}
// And back to ClusterIP
fp.updateService(servicev2, servicev1)
result = UpdateServiceMap(fp.serviceMap, fp.serviceChanges)
if len(fp.serviceMap) != 2 {
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
}
if len(result.HCServiceNodePorts) != 0 {
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
}
if len(result.UDPStaleClusterIP) != 0 {
// Services only added, so nothing stale yet
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
}
}

View File

@ -1,79 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"fmt"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
)
// ProxyProvider is the interface provided by proxier implementations.
type ProxyProvider interface {
// Sync immediately synchronizes the ProxyProvider's current state to proxy rules.
Sync()
// SyncLoop runs periodic work.
// This is expected to run as a goroutine or as the main loop of the app.
// It does not return.
SyncLoop()
}
// ServicePortName carries a namespace + name + portname. This is the unique
// identifier for a load-balanced service.
type ServicePortName struct {
types.NamespacedName
Port string
}
func (spn ServicePortName) String() string {
return fmt.Sprintf("%s:%s", spn.NamespacedName.String(), spn.Port)
}
// ServicePort is an interface which abstracts information about a service.
type ServicePort interface {
// String returns service string. An example format can be: `IP:Port/Protocol`.
String() string
// ClusterIPString returns service cluster IP in string format.
ClusterIPString() string
// GetProtocol returns service protocol.
GetProtocol() api.Protocol
// GetHealthCheckNodePort returns service health check node port if present. If return 0, it means not present.
GetHealthCheckNodePort() int
}
// Endpoint in an interface which abstracts information about an endpoint.
// TODO: Rename functions to be consistent with ServicePort.
type Endpoint interface {
// String returns endpoint string. An example format can be: `IP:Port`.
// We take the returned value as ServiceEndpoint.Endpoint.
String() string
// GetIsLocal returns true if the endpoint is running in same host as kube-proxy, otherwise returns false.
GetIsLocal() bool
// IP returns IP part of the endpoint.
IP() string
// Port returns the Port part of the endpoint.
Port() (int, error)
// Equal checks if two endpoints are equal.
Equal(Endpoint) bool
}
// ServiceEndpoint is used to identify a service and one of its endpoint pair.
type ServiceEndpoint struct {
Endpoint string
ServicePortName ServicePortName
}

View File

@ -1,138 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"loadbalancer.go",
"port_allocator.go",
"proxier.go",
"proxysocket.go",
"roundrobin.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"rlimit_windows.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/proxy/userspace",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/util/conntrack:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/slice:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"@io_bazel_rules_go//go/platform:darwin": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"@io_bazel_rules_go//go/platform:nacl": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"@io_bazel_rules_go//go/platform:plan9": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"@io_bazel_rules_go//go/platform:solaris": [
"//vendor/golang.org/x/sys/unix:go_default_library",
],
"//conditions:default": [],
}),
)
go_test(
name = "go_default_test",
srcs = [
"port_allocator_test.go",
"proxier_test.go",
"roundrobin_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/util/iptables/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,5 +0,0 @@
reviewers:
- thockin
- lavalamp
- smarterclayton
- freehan

View File

@ -1,34 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
"net"
)
// LoadBalancer is an interface for distributing incoming requests to service endpoints.
type LoadBalancer interface {
// NextEndpoint returns the endpoint to handle a request for the given
// service-port and source address.
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeSeconds int) error
DeleteService(service proxy.ServicePortName)
CleanupStaleStickySessions(service proxy.ServicePortName)
ServiceHasEndpoints(service proxy.ServicePortName) bool
}

View File

@ -1,158 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"errors"
"math/big"
"math/rand"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
)
var (
errPortRangeNoPortsRemaining = errors.New("port allocation failed; there are no remaining ports left to allocate in the accepted range")
)
type PortAllocator interface {
AllocateNext() (int, error)
Release(int)
}
// randomAllocator is a PortAllocator implementation that allocates random ports, yielding
// a port value of 0 for every call to AllocateNext().
type randomAllocator struct{}
// AllocateNext always returns 0
func (r *randomAllocator) AllocateNext() (int, error) {
return 0, nil
}
// Release is a noop
func (r *randomAllocator) Release(_ int) {
// noop
}
// newPortAllocator builds PortAllocator for a given PortRange. If the PortRange is empty
// then a random port allocator is returned; otherwise, a new range-based allocator
// is returned.
func newPortAllocator(r net.PortRange) PortAllocator {
if r.Base == 0 {
return &randomAllocator{}
}
return newPortRangeAllocator(r, true)
}
const (
portsBufSize = 16
nextFreePortCooldown = 500 * time.Millisecond
allocateNextTimeout = 1 * time.Second
)
type rangeAllocator struct {
net.PortRange
ports chan int
used big.Int
lock sync.Mutex
rand *rand.Rand
}
func newPortRangeAllocator(r net.PortRange, autoFill bool) PortAllocator {
if r.Base == 0 || r.Size == 0 {
panic("illegal argument: may not specify an empty port range")
}
ra := &rangeAllocator{
PortRange: r,
ports: make(chan int, portsBufSize),
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
}
if autoFill {
go wait.Forever(func() { ra.fillPorts() }, nextFreePortCooldown)
}
return ra
}
// fillPorts loops, always searching for the next free port and, if found, fills the ports buffer with it.
// this func blocks unless there are no remaining free ports.
func (r *rangeAllocator) fillPorts() {
for {
if !r.fillPortsOnce() {
return
}
}
}
func (r *rangeAllocator) fillPortsOnce() bool {
port := r.nextFreePort()
if port == -1 {
return false
}
r.ports <- port
return true
}
// nextFreePort finds a free port, first picking a random port. if that port is already in use
// then the port range is scanned sequentially until either a port is found or the scan completes
// unsuccessfully. an unsuccessful scan returns a port of -1.
func (r *rangeAllocator) nextFreePort() int {
r.lock.Lock()
defer r.lock.Unlock()
// choose random port
j := r.rand.Intn(r.Size)
if b := r.used.Bit(j); b == 0 {
r.used.SetBit(&r.used, j, 1)
return j + r.Base
}
// search sequentially
for i := j + 1; i < r.Size; i++ {
if b := r.used.Bit(i); b == 0 {
r.used.SetBit(&r.used, i, 1)
return i + r.Base
}
}
for i := 0; i < j; i++ {
if b := r.used.Bit(i); b == 0 {
r.used.SetBit(&r.used, i, 1)
return i + r.Base
}
}
return -1
}
func (r *rangeAllocator) AllocateNext() (port int, err error) {
select {
case port = <-r.ports:
case <-time.After(allocateNextTimeout):
err = errPortRangeNoPortsRemaining
}
return
}
func (r *rangeAllocator) Release(port int) {
port -= r.Base
if port < 0 || port >= r.Size {
return
}
r.lock.Lock()
defer r.lock.Unlock()
r.used.SetBit(&r.used, port, 0)
}

View File

@ -1,178 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"reflect"
"testing"
"k8s.io/apimachinery/pkg/util/net"
)
func TestRangeAllocatorEmpty(t *testing.T) {
r := &net.PortRange{}
r.Set("0-0")
defer func() {
if rv := recover(); rv == nil {
t.Fatalf("expected panic because of empty port range: %#v", r)
}
}()
_ = newPortRangeAllocator(*r, true)
}
func TestRangeAllocatorFullyAllocated(t *testing.T) {
r := &net.PortRange{}
r.Set("1-1")
// Don't auto-fill ports, we'll manually turn the crank
pra := newPortRangeAllocator(*r, false)
a := pra.(*rangeAllocator)
// Fill in the one available port
if !a.fillPortsOnce() {
t.Fatalf("Expected to be able to fill ports")
}
// There should be no ports available
if a.fillPortsOnce() {
t.Fatalf("Expected to be unable to fill ports")
}
p, err := a.AllocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if p != 1 {
t.Fatalf("unexpected allocated port: %d", p)
}
a.lock.Lock()
if bit := a.used.Bit(p - a.Base); bit != 1 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", p)
}
a.lock.Unlock()
_, err = a.AllocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
a.Release(p)
a.lock.Lock()
if bit := a.used.Bit(p - a.Base); bit != 0 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", p)
}
a.lock.Unlock()
// Fill in the one available port
if !a.fillPortsOnce() {
t.Fatalf("Expected to be able to fill ports")
}
p, err = a.AllocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if p != 1 {
t.Fatalf("unexpected allocated port: %d", p)
}
a.lock.Lock()
if bit := a.used.Bit(p - a.Base); bit != 1 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", p)
}
a.lock.Unlock()
_, err = a.AllocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range")
}
}
func TestRangeAllocator_RandomishAllocation(t *testing.T) {
r := &net.PortRange{}
r.Set("1-100")
pra := newPortRangeAllocator(*r, false)
a := pra.(*rangeAllocator)
// allocate all the ports
var err error
ports := make([]int, 100, 100)
for i := 0; i < 100; i++ {
if !a.fillPortsOnce() {
t.Fatalf("Expected to be able to fill ports")
}
ports[i], err = a.AllocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if ports[i] < 1 || ports[i] > 100 {
t.Fatalf("unexpected allocated port: %d", ports[i])
}
a.lock.Lock()
if bit := a.used.Bit(ports[i] - a.Base); bit != 1 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
}
a.lock.Unlock()
}
if a.fillPortsOnce() {
t.Fatalf("Expected to be unable to fill ports")
}
// release them all
for i := 0; i < 100; i++ {
a.Release(ports[i])
a.lock.Lock()
if bit := a.used.Bit(ports[i] - a.Base); bit != 0 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
}
a.lock.Unlock()
}
// allocate the ports again
rports := make([]int, 100, 100)
for i := 0; i < 100; i++ {
if !a.fillPortsOnce() {
t.Fatalf("Expected to be able to fill ports")
}
rports[i], err = a.AllocateNext()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if rports[i] < 1 || rports[i] > 100 {
t.Fatalf("unexpected allocated port: %d", rports[i])
}
a.lock.Lock()
if bit := a.used.Bit(rports[i] - a.Base); bit != 1 {
a.lock.Unlock()
t.Fatalf("unexpected used bit for allocated port: %d", rports[i])
}
a.lock.Unlock()
}
if a.fillPortsOnce() {
t.Fatalf("Expected to be unable to fill ports")
}
if reflect.DeepEqual(ports, rports) {
t.Fatalf("expected re-allocated ports to be in a somewhat random order")
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,948 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strconv"
"sync/atomic"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)
const (
udpIdleTimeoutForTest = 250 * time.Millisecond
)
func joinHostPort(host string, port int) string {
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
}
func waitForClosedPortTCP(p *Proxier, proxyPort int) error {
for i := 0; i < 50; i++ {
conn, err := net.Dial("tcp", joinHostPort("", proxyPort))
if err != nil {
return nil
}
conn.Close()
time.Sleep(1 * time.Millisecond)
}
return fmt.Errorf("port %d still open", proxyPort)
}
func waitForClosedPortUDP(p *Proxier, proxyPort int) error {
for i := 0; i < 50; i++ {
conn, err := net.Dial("udp", joinHostPort("", proxyPort))
if err != nil {
return nil
}
conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
// To detect a closed UDP port write, then read.
_, err = conn.Write([]byte("x"))
if err != nil {
if e, ok := err.(net.Error); ok && !e.Timeout() {
return nil
}
}
var buf [4]byte
_, err = conn.Read(buf[0:])
if err != nil {
if e, ok := err.(net.Error); ok && !e.Timeout() {
return nil
}
}
conn.Close()
time.Sleep(1 * time.Millisecond)
}
return fmt.Errorf("port %d still open", proxyPort)
}
// udpEchoServer is a simple echo server in UDP, intended for testing the proxy.
type udpEchoServer struct {
net.PacketConn
}
func newUDPEchoServer() (*udpEchoServer, error) {
packetconn, err := net.ListenPacket("udp", ":0")
if err != nil {
return nil, err
}
return &udpEchoServer{packetconn}, nil
}
func (r *udpEchoServer) Loop() {
var buffer [4096]byte
for {
n, cliAddr, err := r.ReadFrom(buffer[0:])
if err != nil {
fmt.Printf("ReadFrom failed: %v\n", err)
continue
}
r.WriteTo(buffer[0:n], cliAddr)
}
}
var tcpServerPort int32
var udpServerPort int32
func TestMain(m *testing.M) {
// Don't handle panics
runtime.ReallyCrash = true
// TCP setup.
tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.URL.Path[1:]))
}))
defer tcp.Close()
u, err := url.Parse(tcp.URL)
if err != nil {
panic(fmt.Sprintf("failed to parse: %v", err))
}
_, port, err := net.SplitHostPort(u.Host)
if err != nil {
panic(fmt.Sprintf("failed to parse: %v", err))
}
tcpServerPortValue, err := strconv.Atoi(port)
if err != nil {
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
}
tcpServerPort = int32(tcpServerPortValue)
// UDP setup.
udp, err := newUDPEchoServer()
if err != nil {
panic(fmt.Sprintf("failed to make a UDP server: %v", err))
}
_, port, err = net.SplitHostPort(udp.LocalAddr().String())
if err != nil {
panic(fmt.Sprintf("failed to parse: %v", err))
}
udpServerPortValue, err := strconv.Atoi(port)
if err != nil {
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
}
udpServerPort = int32(udpServerPortValue)
go udp.Loop()
ret := m.Run()
// it should be safe to call Close() multiple times.
tcp.Close()
os.Exit(ret)
}
func testEchoTCP(t *testing.T, address string, port int) {
path := "aaaaa"
res, err := http.Get("http://" + address + ":" + fmt.Sprintf("%d", port) + "/" + path)
if err != nil {
t.Fatalf("error connecting to server: %v", err)
}
defer res.Body.Close()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Errorf("error reading data: %v %v", err, string(data))
}
if string(data) != path {
t.Errorf("expected: %s, got %s", path, string(data))
}
}
func testEchoUDP(t *testing.T, address string, port int) {
data := "abc123"
conn, err := net.Dial("udp", joinHostPort(address, port))
if err != nil {
t.Fatalf("error connecting to server: %v", err)
}
if _, err := conn.Write([]byte(data)); err != nil {
t.Fatalf("error sending to server: %v", err)
}
var resp [1024]byte
n, err := conn.Read(resp[0:])
if err != nil {
t.Errorf("error receiving data: %v", err)
}
if string(resp[0:n]) != data {
t.Errorf("expected: %s, got %s", data, string(resp[0:n]))
}
}
func waitForNumProxyLoops(t *testing.T, p *Proxier, want int32) {
var got int32
for i := 0; i < 600; i++ {
got = atomic.LoadInt32(&p.numProxyLoops)
if got == want {
return
}
time.Sleep(100 * time.Millisecond)
}
t.Errorf("expected %d ProxyLoops running, got %d", want, got)
}
func waitForNumProxyClients(t *testing.T, s *ServiceInfo, want int, timeout time.Duration) {
var got int
now := time.Now()
deadline := now.Add(timeout)
for time.Now().Before(deadline) {
s.ActiveClients.Mu.Lock()
got = len(s.ActiveClients.Clients)
s.ActiveClients.Mu.Unlock()
if got == want {
return
}
time.Sleep(500 * time.Millisecond)
}
t.Errorf("expected %d ProxyClients live, got %d", want, got)
}
func TestTCPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxyTimeout(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
waitForNumProxyLoops(t, p, 1)
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
// When connecting to a UDP service endpoint, there should be a Conn for proxy.
waitForNumProxyClients(t, svcInfo, 1, time.Second)
// If conn has no activity for serviceInfo.timeout since last Read/Write, it should be closed because of timeout.
waitForNumProxyClients(t, svcInfo, 0, 2*time.Second)
}
func TestMultiPortProxy(t *testing.T) {
lb := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
}},
})
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfoP, err := p.addServiceOnPort(serviceP, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", svcInfoP.proxyPort)
waitForNumProxyLoops(t, p, 1)
svcInfoQ, err := p.addServiceOnPort(serviceQ, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoUDP(t, "127.0.0.1", svcInfoQ.proxyPort)
waitForNumProxyLoops(t, p, 2)
}
func TestMultiPortOnServiceAdd(t *testing.T) {
lb := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "q"}
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: 80,
Protocol: "TCP",
}, {
Name: "q",
Port: 81,
Protocol: "UDP",
}}},
})
waitForNumProxyLoops(t, p, 2)
svcInfo, exists := p.getServiceInfo(serviceP)
if !exists {
t.Fatalf("can't find serviceInfo for %s", serviceP)
}
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 80 || svcInfo.protocol != "TCP" {
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
}
svcInfo, exists = p.getServiceInfo(serviceQ)
if !exists {
t.Fatalf("can't find serviceInfo for %s", serviceQ)
}
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 81 || svcInfo.protocol != "UDP" {
t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo)
}
svcInfo, exists = p.getServiceInfo(serviceX)
if exists {
t.Fatalf("found unwanted serviceInfo for %s: %#v", serviceX, svcInfo)
}
}
// Helper: Stops the proxy for the named service.
func stopProxyByName(proxier *Proxier, service proxy.ServicePortName) error {
info, found := proxier.getServiceInfo(service)
if !found {
return fmt.Errorf("unknown service: %s", service)
}
return proxier.stopProxy(service, info)
}
func TestTCPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
if !svcInfo.IsAlive() {
t.Fatalf("wrong value for IsAlive(): expected true")
}
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
stopProxyByName(p, service)
if svcInfo.IsAlive() {
t.Fatalf("wrong value for IsAlive(): expected false")
}
// Wait for the port to really close.
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestUDPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
stopProxyByName(p, service)
// Wait for the port to really close.
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestTCPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
})
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestUDPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
}}},
})
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
})
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
})
svcInfo, exists := p.getServiceInfo(service)
if !exists {
t.Fatalf("can't find serviceInfo for %s", service)
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
}}},
})
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
}}},
})
svcInfo, exists := p.getServiceInfo(service)
if !exists {
t.Fatalf("can't find serviceInfo")
}
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func TestTCPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: 99,
Protocol: "TCP",
}}},
})
// Wait for the socket to actually get free.
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
svcInfo, exists := p.getServiceInfo(service)
if !exists {
t.Fatalf("can't find serviceInfo")
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
// This is a bit async, but this should be sufficient.
time.Sleep(500 * time.Millisecond)
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: 99,
Protocol: "UDP",
}}},
})
// Wait for the socket to actually get free.
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
svcInfo, exists := p.getServiceInfo(service)
if !exists {
t.Fatalf("can't find serviceInfo")
}
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func TestProxyUpdatePublicIPs(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
}},
ClusterIP: svcInfo.portal.ip.String(),
ExternalIPs: []string{"4.3.2.1"},
},
})
// Wait for the socket to actually get free.
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
t.Fatalf(err.Error())
}
svcInfo, exists := p.getServiceInfo(service)
if !exists {
t.Fatalf("can't find serviceInfo")
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
// This is a bit async, but this should be sufficient.
time.Sleep(500 * time.Millisecond)
waitForNumProxyLoops(t, p, 1)
}
func TestProxyUpdatePortal(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
svcv0 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
}
svcv1 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
}
p.OnServiceUpdate(svcv0, svcv1)
_, exists := p.getServiceInfo(service)
if exists {
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
}
svcv2 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
}
p.OnServiceUpdate(svcv1, svcv2)
_, exists = p.getServiceInfo(service)
if exists {
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
}
svcv3 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
}
p.OnServiceUpdate(svcv2, svcv3)
lb.OnEndpointsAdd(endpoint)
svcInfo, exists = p.getServiceInfo(service)
if !exists {
t.Fatalf("service with ClusterIP set not found in the proxy")
}
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
}
func makeFakeExec() *fakeexec.FakeExec {
fcmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
},
}
return &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
},
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
}
}
// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in

View File

@ -1,302 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"fmt"
"io"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
)
// Abstraction over TCP/UDP sockets which are proxied.
type ProxySocket interface {
// Addr gets the net.Addr for a ProxySocket.
Addr() net.Addr
// Close stops the ProxySocket from accepting incoming connections.
// Each implementation should comment on the impact of calling Close
// while sessions are active.
Close() error
// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
ProxyLoop(service proxy.ServicePortName, info *ServiceInfo, loadBalancer LoadBalancer)
// ListenPort returns the host port that the ProxySocket is listening on
ListenPort() int
}
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (ProxySocket, error) {
host := ""
if ip != nil {
host = ip.String()
}
switch strings.ToUpper(string(protocol)) {
case "TCP":
listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
if err != nil {
return nil, err
}
return &tcpProxySocket{Listener: listener, port: port}, nil
case "UDP":
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
return &udpProxySocket{UDPConn: conn, port: port}, nil
}
return nil, fmt.Errorf("unknown protocol %q", protocol)
}
// How long we wait for a connection to a backend in seconds
var EndpointDialTimeouts = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
// tcpProxySocket implements ProxySocket. Close() is implemented by net.Listener. When Close() is called,
// no new connections are allowed but existing connections are left untouched.
type tcpProxySocket struct {
net.Listener
port int
}
func (tcp *tcpProxySocket) ListenPort() int {
return tcp.port
}
// TryConnectEndpoints attempts to connect to the next available endpoint for the given service, cycling
// through until it is able to successfully connect, or it has tried with all timeouts in EndpointDialTimeouts.
func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protocol string, loadBalancer LoadBalancer) (out net.Conn, err error) {
sessionAffinityReset := false
for _, dialTimeout := range EndpointDialTimeouts {
endpoint, err := loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset)
if err != nil {
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
return nil, err
}
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
if err != nil {
if isTooManyFDsError(err) {
panic("Dial failed: " + err.Error())
}
glog.Errorf("Dial failed: %v", err)
sessionAffinityReset = true
continue
}
return outConn, nil
}
return nil, fmt.Errorf("failed to connect to an endpoint.")
}
func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *ServiceInfo, loadBalancer LoadBalancer) {
for {
if !myInfo.IsAlive() {
// The service port was closed or replaced.
return
}
// Block until a connection is made.
inConn, err := tcp.Accept()
if err != nil {
if isTooManyFDsError(err) {
panic("Accept failed: " + err.Error())
}
if isClosedError(err) {
return
}
if !myInfo.IsAlive() {
// Then the service port was just closed so the accept failure is to be expected.
return
}
glog.Errorf("Accept failed: %v", err)
continue
}
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
outConn, err := TryConnectEndpoints(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", loadBalancer)
if err != nil {
glog.Errorf("Failed to connect to balancer: %v", err)
inConn.Close()
continue
}
// Spin up an async copy loop.
go ProxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
}
}
// ProxyTCP proxies data bi-directionally between in and out.
func ProxyTCP(in, out *net.TCPConn) {
var wg sync.WaitGroup
wg.Add(2)
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
go copyBytes("from backend", in, out, &wg)
go copyBytes("to backend", out, in, &wg)
wg.Wait()
}
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
defer wg.Done()
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
n, err := io.Copy(dest, src)
if err != nil {
if !isClosedError(err) {
glog.Errorf("I/O error: %v", err)
}
}
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
dest.Close()
src.Close()
}
// udpProxySocket implements ProxySocket. Close() is implemented by net.UDPConn. When Close() is called,
// no new connections are allowed and existing connections are broken.
// TODO: We could lame-duck this ourselves, if it becomes important.
type udpProxySocket struct {
*net.UDPConn
port int
}
func (udp *udpProxySocket) ListenPort() int {
return udp.port
}
func (udp *udpProxySocket) Addr() net.Addr {
return udp.LocalAddr()
}
// Holds all the known UDP clients that have not timed out.
type ClientCache struct {
Mu sync.Mutex
Clients map[string]net.Conn // addr string -> connection
}
func newClientCache() *ClientCache {
return &ClientCache{Clients: map[string]net.Conn{}}
}
func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *ServiceInfo, loadBalancer LoadBalancer) {
var buffer [4096]byte // 4KiB should be enough for most whole-packets
for {
if !myInfo.IsAlive() {
// The service port was closed or replaced.
break
}
// Block until data arrives.
// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
n, cliAddr, err := udp.ReadFrom(buffer[0:])
if err != nil {
if e, ok := err.(net.Error); ok {
if e.Temporary() {
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
continue
}
}
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
break
}
// If this is a client we know already, reuse the connection and goroutine.
svrConn, err := udp.getBackendConn(myInfo.ActiveClients, cliAddr, loadBalancer, service, myInfo.Timeout)
if err != nil {
continue
}
// TODO: It would be nice to let the goroutine handle this write, but we don't
// really want to copy the buffer. We could do a pool of buffers or something.
_, err = svrConn.Write(buffer[0:n])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Write failed: %v", err)
// TODO: Maybe tear down the goroutine for this client/server pair?
}
continue
}
err = svrConn.SetDeadline(time.Now().Add(myInfo.Timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
continue
}
}
}
func (udp *udpProxySocket) getBackendConn(activeClients *ClientCache, cliAddr net.Addr, loadBalancer LoadBalancer, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) {
activeClients.Mu.Lock()
defer activeClients.Mu.Unlock()
svrConn, found := activeClients.Clients[cliAddr.String()]
if !found {
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
glog.V(3).Infof("New UDP connection from %s", cliAddr)
var err error
svrConn, err = TryConnectEndpoints(service, cliAddr, "udp", loadBalancer)
if err != nil {
return nil, err
}
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
glog.Errorf("SetDeadline failed: %v", err)
return nil, err
}
activeClients.Clients[cliAddr.String()] = svrConn
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) {
defer runtime.HandleCrash()
udp.proxyClient(cliAddr, svrConn, activeClients, timeout)
}(cliAddr, svrConn, activeClients, timeout)
}
return svrConn, nil
}
// This function is expected to be called as a goroutine.
// TODO: Track and log bytes copied, like TCP
func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) {
defer svrConn.Close()
var buffer [4096]byte
for {
n, err := svrConn.Read(buffer[0:])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Read failed: %v", err)
}
break
}
err = svrConn.SetDeadline(time.Now().Add(timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
break
}
n, err = udp.WriteTo(buffer[0:n], cliAddr)
if err != nil {
if !logTimeout(err) {
glog.Errorf("WriteTo failed: %v", err)
}
break
}
}
activeClients.Mu.Lock()
delete(activeClients.Clients, cliAddr.String())
activeClients.Mu.Unlock()
}

View File

@ -1,25 +0,0 @@
// +build !windows
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import "golang.org/x/sys/unix"
func setRLimit(limit uint64) error {
return unix.Setrlimit(unix.RLIMIT_NOFILE, &unix.Rlimit{Max: limit, Cur: limit})
}

View File

@ -1,23 +0,0 @@
// +build windows
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
func setRLimit(limit uint64) error {
return nil
}

View File

@ -1,386 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"errors"
"fmt"
"net"
"reflect"
"strconv"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/util/slice"
)
var (
ErrMissingServiceEntry = errors.New("missing service entry")
ErrMissingEndpoints = errors.New("missing endpoints")
)
type affinityState struct {
clientIP string
//clientProtocol api.Protocol //not yet used
//sessionCookie string //not yet used
endpoint string
lastUsed time.Time
}
type affinityPolicy struct {
affinityType api.ServiceAffinity
affinityMap map[string]*affinityState // map client IP -> affinity info
ttlSeconds int
}
// LoadBalancerRR is a round-robin load balancer.
type LoadBalancerRR struct {
lock sync.RWMutex
services map[proxy.ServicePortName]*balancerState
}
// Ensure this implements LoadBalancer.
var _ LoadBalancer = &LoadBalancerRR{}
type balancerState struct {
endpoints []string // a list of "ip:port" style strings
index int // current index into endpoints
affinity affinityPolicy
}
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlSeconds int) *affinityPolicy {
return &affinityPolicy{
affinityType: affinityType,
affinityMap: make(map[string]*affinityState),
ttlSeconds: ttlSeconds,
}
}
// NewLoadBalancerRR returns a new LoadBalancerRR.
func NewLoadBalancerRR() *LoadBalancerRR {
return &LoadBalancerRR{
services: map[proxy.ServicePortName]*balancerState{},
}
}
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) error {
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
return nil
}
// This assumes that lb.lock is already held.
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) *balancerState {
if ttlSeconds == 0 {
ttlSeconds = int(api.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
}
if _, exists := lb.services[svcPort]; !exists {
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
} else if affinityType != "" {
lb.services[svcPort].affinity.affinityType = affinityType
}
return lb.services[svcPort]
}
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
delete(lb.services, svcPort)
}
// return true if this service is using some form of session affinity.
func isSessionAffinity(affinity *affinityPolicy) bool {
// Should never be empty string, but checking for it to be safe.
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
return false
}
return true
}
// ServiceHasEndpoints checks whether a service entry has endpoints.
func (lb *LoadBalancerRR) ServiceHasEndpoints(svcPort proxy.ServicePortName) bool {
lb.lock.Lock()
defer lb.lock.Unlock()
state, exists := lb.services[svcPort]
// TODO: while nothing ever assigns nil to the map, *some* of the code using the map
// checks for it. The code should all follow the same convention.
return exists && state != nil && len(state.endpoints) > 0
}
// NextEndpoint returns a service endpoint.
// The service endpoint is chosen using the round-robin algorithm.
func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) {
// Coarse locking is simple. We can get more fine-grained if/when we
// can prove it matters.
lb.lock.Lock()
defer lb.lock.Unlock()
state, exists := lb.services[svcPort]
if !exists || state == nil {
return "", ErrMissingServiceEntry
}
if len(state.endpoints) == 0 {
return "", ErrMissingEndpoints
}
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
var ipaddr string
if sessionAffinityEnabled {
// Caution: don't shadow ipaddr
var err error
ipaddr, _, err = net.SplitHostPort(srcAddr.String())
if err != nil {
return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err)
}
if !sessionAffinityReset {
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
// Affinity wins.
endpoint := sessionAffinity.endpoint
sessionAffinity.lastUsed = time.Now()
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
return endpoint, nil
}
}
}
// Take the next endpoint.
endpoint := state.endpoints[state.index]
state.index = (state.index + 1) % len(state.endpoints)
if sessionAffinityEnabled {
var affinity *affinityState
affinity = state.affinity.affinityMap[ipaddr]
if affinity == nil {
affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()}
state.affinity.affinityMap[ipaddr] = affinity
}
affinity.lastUsed = time.Now()
affinity.endpoint = endpoint
affinity.clientIP = ipaddr
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
}
return endpoint, nil
}
type hostPortPair struct {
host string
port int
}
func isValidEndpoint(hpp *hostPortPair) bool {
return hpp.host != "" && hpp.port > 0
}
func flattenValidEndpoints(endpoints []hostPortPair) []string {
// Convert Endpoint objects into strings for easier use later. Ignore
// the protocol field - we'll get that from the Service objects.
var result []string
for i := range endpoints {
hpp := &endpoints[i]
if isValidEndpoint(hpp) {
result = append(result, net.JoinHostPort(hpp.host, strconv.Itoa(hpp.port)))
}
}
return result
}
// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
for _, affinity := range state.affinity.affinityMap {
if affinity.endpoint == endpoint {
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
delete(state.affinity.affinityMap, affinity.clientIP)
}
}
}
// Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
// Then remove any session affinity records that are not in both lists.
// This assumes the lb.lock is held.
func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) {
allEndpoints := map[string]int{}
for _, newEndpoint := range newEndpoints {
allEndpoints[newEndpoint] = 1
}
state, exists := lb.services[svcPort]
if !exists {
return
}
for _, existingEndpoint := range state.endpoints {
allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1
}
for mKey, mVal := range allEndpoints {
if mVal == 1 {
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
removeSessionAffinityByEndpoint(state, svcPort, mKey)
}
}
}
// buildPortsToEndpointsMap builds a map of portname -> all ip:ports for that
// portname. Expode Endpoints.Subsets[*] into this structure.
func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPair {
portsToEndpoints := map[string][]hostPortPair{}
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
for i := range ss.Ports {
port := &ss.Ports[i]
for i := range ss.Addresses {
addr := &ss.Addresses[i]
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)})
// Ignore the protocol field - we'll get that from the Service objects.
}
}
}
return portsToEndpoints
}
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
defer lb.lock.Unlock()
for portname := range portsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
state, exists := lb.services[svcPort]
if !exists || state == nil || len(newEndpoints) > 0 {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsAdd can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
state.index = 0
}
}
}
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
oldPortsToEndpoints := buildPortsToEndpointsMap(oldEndpoints)
registeredEndpoints := make(map[proxy.ServicePortName]bool)
lb.lock.Lock()
defer lb.lock.Unlock()
for portname := range portsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
state, exists := lb.services[svcPort]
curEndpoints := []string{}
if state != nil {
curEndpoints = state.endpoints
}
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsUpdate can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
state.index = 0
}
registeredEndpoints[svcPort] = true
}
// Now remove all endpoints missing from the update.
for portname := range oldPortsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: oldEndpoints.Namespace, Name: oldEndpoints.Name}, Port: portname}
if _, exists := registeredEndpoints[svcPort]; !exists {
lb.resetService(svcPort)
}
}
}
func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) {
// If the service is still around, reset but don't delete.
if state, ok := lb.services[svcPort]; ok {
if len(state.endpoints) > 0 {
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
state.endpoints = []string{}
}
state.index = 0
state.affinity.affinityMap = map[string]*affinityState{}
}
}
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
defer lb.lock.Unlock()
for portname := range portsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
lb.resetService(svcPort)
}
}
func (lb *LoadBalancerRR) OnEndpointsSynced() {
}
// Tests whether two slices are equivalent. This sorts both slices in-place.
func slicesEquiv(lhs, rhs []string) bool {
if len(lhs) != len(rhs) {
return false
}
if reflect.DeepEqual(slice.SortStrings(lhs), slice.SortStrings(rhs)) {
return true
}
return false
}
func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) {
lb.lock.Lock()
defer lb.lock.Unlock()
state, exists := lb.services[svcPort]
if !exists {
return
}
for ip, affinity := range state.affinity.affinityMap {
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
delete(state.affinity.affinityMap, ip)
}
}
}

View File

@ -1,717 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userspace
import (
"net"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
)
func TestValidateWorks(t *testing.T) {
if isValidEndpoint(&hostPortPair{}) {
t.Errorf("Didn't fail for empty set")
}
if isValidEndpoint(&hostPortPair{host: "foobar"}) {
t.Errorf("Didn't fail with invalid port")
}
if isValidEndpoint(&hostPortPair{host: "foobar", port: -1}) {
t.Errorf("Didn't fail with a negative port")
}
if !isValidEndpoint(&hostPortPair{host: "foobar", port: 8080}) {
t.Errorf("Failed a valid config.")
}
}
func TestFilterWorks(t *testing.T) {
endpoints := []hostPortPair{
{host: "foobar", port: 1},
{host: "foobar", port: 2},
{host: "foobar", port: -1},
{host: "foobar", port: 3},
{host: "foobar", port: -2},
}
filtered := flattenValidEndpoints(endpoints)
if len(filtered) != 3 {
t.Errorf("Failed to filter to the correct size")
}
if filtered[0] != "foobar:1" {
t.Errorf("Index zero is not foobar:1")
}
if filtered[1] != "foobar:2" {
t.Errorf("Index one is not foobar:2")
}
if filtered[2] != "foobar:3" {
t.Errorf("Index two is not foobar:3")
}
}
func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil {
t.Errorf("Didn't fail with non-existent service")
}
if len(endpoint) != 0 {
t.Errorf("Got an endpoint")
}
}
func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false)
if err != nil {
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
}
if endpoint != expected {
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
}
}
func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true)
if err != nil {
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
}
if endpoint != expected {
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
}
}
func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
}
func stringsInSlice(haystack []string, needles ...string) bool {
for _, needle := range needles {
found := false
for i := range haystack {
if haystack[i] == needle {
found = true
break
}
}
if found == false {
return false
}
}
return true
}
func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
shuffledEndpoints := loadBalancer.services[service].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
}
func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
}
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpointsv1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
},
},
}
loadBalancer.OnEndpointsAdd(endpointsv1)
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
shuffledEndpoints = loadBalancer.services[serviceP].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
}
func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"}
endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
},
},
}
endpoints2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
},
},
}
loadBalancer.OnEndpointsAdd(endpoints1)
loadBalancer.OnEndpointsAdd(endpoints2)
shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
// Then update the configuration by removing foo
loadBalancer.OnEndpointsDelete(endpoints1)
endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// but bar is still there, and we continue RR from where we left off.
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
}
func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
}
func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// Call OnEndpointsUpdate() before NewService()
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
}
func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0}
client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0}
client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0}
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
loadBalancer.OnEndpointsAdd(endpointsv1)
shuffledEndpoints := loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
client1Endpoint := shuffledEndpoints[0]
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
client2Endpoint := shuffledEndpoints[1]
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
client3Endpoint := shuffledEndpoints[2]
endpointsv2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
shuffledEndpoints = loadBalancer.services[service].endpoints
if client1Endpoint == "endpoint:3" {
client1Endpoint = shuffledEndpoints[0]
} else if client2Endpoint == "endpoint:3" {
client2Endpoint = shuffledEndpoints[0]
} else if client3Endpoint == "endpoint:3" {
client3Endpoint = shuffledEndpoints[0]
}
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
endpointsv3 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
shuffledEndpoints = loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6)
}
func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
loadBalancer.OnEndpointsAdd(endpointsv1)
shuffledEndpoints := loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
shuffledEndpoints = loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
}
func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
loadBalancer := NewLoadBalancerRR()
fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
loadBalancer.OnEndpointsAdd(endpoints1)
loadBalancer.OnEndpointsAdd(endpoints2)
shuffledFooEndpoints := loadBalancer.services[fooService].endpoints
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
shuffledBarEndpoints := loadBalancer.services[barService].endpoints
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
// Then update the configuration by removing foo
loadBalancer.OnEndpointsDelete(endpoints1)
endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// but bar is still there, and we continue RR from where we left off.
shuffledBarEndpoints = loadBalancer.services[barService].endpoints
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
}
func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
}

View File

@ -1,57 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"endpoints.go",
"network.go",
"port.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/util",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/util/net:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"endpoints_test.go",
"port_test.go",
"utils_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy/util/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/proxy/util/testing:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,74 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"net"
"strconv"
"github.com/golang/glog"
)
// IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP
// part is an IPv6 address enclosed in brackets (e.g. "[fd00:1::5]:9999"),
// then the brackets are stripped as well.
func IPPart(s string) string {
if ip := net.ParseIP(s); ip != nil {
// IP address without port
return s
}
// Must be IP:port
host, _, err := net.SplitHostPort(s)
if err != nil {
glog.Errorf("Error parsing '%s': %v", s, err)
return ""
}
// Check if host string is a valid IP address
if ip := net.ParseIP(host); ip != nil {
return ip.String()
} else {
glog.Errorf("invalid IP part '%s'", host)
}
return ""
}
// PortPart returns just the port part of an endpoint string.
func PortPart(s string) (int, error) {
// Must be IP:port
_, port, err := net.SplitHostPort(s)
if err != nil {
glog.Errorf("Error parsing '%s': %v", s, err)
return -1, err
}
portNumber, err := strconv.Atoi(port)
if err != nil {
glog.Errorf("Error parsing '%s': %v", port, err)
return -1, err
}
return portNumber, nil
}
// ToCIDR returns a host address of the form <ip-address>/32 for
// IPv4 and <ip-address>/128 for IPv6
func ToCIDR(ip net.IP) string {
len := 32
if ip.To4() == nil {
len = 128
}
return fmt.Sprintf("%s/%d", ip.String(), len)
}

View File

@ -1,69 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"net"
"testing"
)
func TestIPPart(t *testing.T) {
const noError = ""
testCases := []struct {
endpoint string
expectedIP string
expectedError string
}{
{"1.2.3.4", "1.2.3.4", noError},
{"1.2.3.4:9999", "1.2.3.4", noError},
{"2001:db8::1:1", "2001:db8::1:1", noError},
{"[2001:db8::2:2]:9999", "2001:db8::2:2", noError},
{"1.2.3.4::9999", "", "too many colons"},
{"1.2.3.4:[0]", "", "unexpected '[' in address"},
{"1.2.3:8080", "", "invalid ip part"},
}
for _, tc := range testCases {
ip := IPPart(tc.endpoint)
if tc.expectedError == noError {
if ip != tc.expectedIP {
t.Errorf("Unexpected IP for %s: Expected: %s, Got %s", tc.endpoint, tc.expectedIP, ip)
}
} else if ip != "" {
t.Errorf("Error did not occur for %s, expected: '%s' error", tc.endpoint, tc.expectedError)
}
}
}
func TestToCIDR(t *testing.T) {
testCases := []struct {
ip string
expectedAddr string
}{
{"1.2.3.4", "1.2.3.4/32"},
{"2001:db8::1:1", "2001:db8::1:1/128"},
}
for _, tc := range testCases {
ip := net.ParseIP(tc.ip)
addr := ToCIDR(ip)
if addr != tc.expectedAddr {
t.Errorf("Unexpected host address for %s: Expected: %s, Got %s", tc.ip, tc.expectedAddr, addr)
}
}
}

View File

@ -1,45 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"net"
)
// NetworkInterfacer defines an interface for several net library functions. Production
// code will forward to net library functions, and unit tests will override the methods
// for testing purposes.
type NetworkInterfacer interface {
Addrs(intf *net.Interface) ([]net.Addr, error)
Interfaces() ([]net.Interface, error)
}
// RealNetwork implements the NetworkInterfacer interface for production code, just
// wrapping the underlying net library function calls.
type RealNetwork struct{}
// Addrs wraps net.Interface.Addrs(), it's a part of NetworkInterfacer interface.
func (_ RealNetwork) Addrs(intf *net.Interface) ([]net.Addr, error) {
return intf.Addrs()
}
// Interfaces wraps net.Interfaces(), it's a part of NetworkInterfacer interface.
func (_ RealNetwork) Interfaces() ([]net.Interface, error) {
return net.Interfaces()
}
var _ NetworkInterfacer = &RealNetwork{}

View File

@ -1,67 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"net"
"strconv"
"github.com/golang/glog"
)
// LocalPort describes a port on specific IP address and protocol
type LocalPort struct {
// Description is the identity message of a given local port.
Description string
// IP is the IP address part of a given local port.
// If this string is empty, the port binds to all local IP addresses.
IP string
// Port is the port part of a given local port.
Port int
// Protocol is the protocol part of a given local port.
// The value is assumed to be lower-case. For example, "udp" not "UDP", "tcp" not "TCP".
Protocol string
}
func (lp *LocalPort) String() string {
ipPort := net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port))
return fmt.Sprintf("%q (%s/%s)", lp.Description, ipPort, lp.Protocol)
}
// Closeable is an interface around closing an port.
type Closeable interface {
Close() error
}
// PortOpener is an interface around port opening/closing.
// Abstracted out for testing.
type PortOpener interface {
OpenLocalPort(lp *LocalPort) (Closeable, error)
}
// RevertPorts is closing ports in replacementPortsMap but not in originalPortsMap. In other words, it only
// closes the ports opened in this sync.
func RevertPorts(replacementPortsMap, originalPortsMap map[LocalPort]Closeable) {
for k, v := range replacementPortsMap {
// Only close newly opened local ports - leave ones that were open before this update
if originalPortsMap[k] == nil {
glog.V(2).Infof("Closing local port %s", k.String())
v.Close()
}
}
}

View File

@ -1,143 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import "testing"
type fakeClosable struct {
closed bool
}
func (c *fakeClosable) Close() error {
c.closed = true
return nil
}
func TestLocalPortString(t *testing.T) {
testCases := []struct {
description string
ip string
port int
protocol string
expectedStr string
}{
{"IPv4 UDP", "1.2.3.4", 9999, "udp", "\"IPv4 UDP\" (1.2.3.4:9999/udp)"},
{"IPv4 TCP", "5.6.7.8", 1053, "tcp", "\"IPv4 TCP\" (5.6.7.8:1053/tcp)"},
{"IPv6 TCP", "2001:db8::1", 80, "tcp", "\"IPv6 TCP\" ([2001:db8::1]:80/tcp)"},
}
for _, tc := range testCases {
lp := &LocalPort{
Description: tc.description,
IP: tc.ip,
Port: tc.port,
Protocol: tc.protocol,
}
str := lp.String()
if str != tc.expectedStr {
t.Errorf("Unexpected output for %s, expected: %s, got: %s", tc.description, tc.expectedStr, str)
}
}
}
func TestRevertPorts(t *testing.T) {
testCases := []struct {
replacementPorts []LocalPort
existingPorts []LocalPort
expectToBeClose []bool
}{
{
replacementPorts: []LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
},
existingPorts: []LocalPort{},
expectToBeClose: []bool{true, true, true},
},
{
replacementPorts: []LocalPort{},
existingPorts: []LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
},
expectToBeClose: []bool{},
},
{
replacementPorts: []LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
},
existingPorts: []LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
},
expectToBeClose: []bool{false, false, false},
},
{
replacementPorts: []LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
},
existingPorts: []LocalPort{
{Port: 5001},
{Port: 5003},
},
expectToBeClose: []bool{false, true, false},
},
{
replacementPorts: []LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
},
existingPorts: []LocalPort{
{Port: 5001},
{Port: 5002},
{Port: 5003},
{Port: 5004},
},
expectToBeClose: []bool{false, false, false},
},
}
for i, tc := range testCases {
replacementPortsMap := make(map[LocalPort]Closeable)
for _, lp := range tc.replacementPorts {
replacementPortsMap[lp] = &fakeClosable{}
}
existingPortsMap := make(map[LocalPort]Closeable)
for _, lp := range tc.existingPorts {
existingPortsMap[lp] = &fakeClosable{}
}
RevertPorts(replacementPortsMap, existingPortsMap)
for j, expectation := range tc.expectToBeClose {
if replacementPortsMap[tc.replacementPorts[j]].(*fakeClosable).closed != expectation {
t.Errorf("Expect replacement localport %v to be %v in test case %v", tc.replacementPorts[j], expectation, i)
}
}
for _, lp := range tc.existingPorts {
if existingPortsMap[lp].(*fakeClosable).closed == true {
t.Errorf("Expect existing localport %v to be false in test case %v", lp, i)
}
}
}
}

View File

@ -1,22 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["fake.go"],
importpath = "k8s.io/kubernetes/pkg/proxy/util/testing",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,65 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import "net"
// FakeNetwork implements the NetworkInterfacer interface for test purpose.
type FakeNetwork struct {
NetworkInterfaces []net.Interface
// The key of map Addrs is the network interface name
Address map[string][]net.Addr
}
// NewFakeNetwork initializes a FakeNetwork.
func NewFakeNetwork() *FakeNetwork {
return &FakeNetwork{
NetworkInterfaces: make([]net.Interface, 0),
Address: make(map[string][]net.Addr),
}
}
// AddInterfaceAddr create an interface and its associated addresses for FakeNetwork implementation.
func (f *FakeNetwork) AddInterfaceAddr(intf *net.Interface, addrs []net.Addr) {
f.NetworkInterfaces = append(f.NetworkInterfaces, *intf)
f.Address[intf.Name] = addrs
}
// Addrs is part of NetworkInterfacer interface.
func (f *FakeNetwork) Addrs(intf *net.Interface) ([]net.Addr, error) {
return f.Address[intf.Name], nil
}
// Interfaces is part of NetworkInterfacer interface.
func (f *FakeNetwork) Interfaces() ([]net.Interface, error) {
return f.NetworkInterfaces, nil
}
// AddrStruct implements the net.Addr for test purpose.
type AddrStruct struct{ Val string }
// Network is part of net.Addr interface.
func (a AddrStruct) Network() string {
return a.Val
}
// String is part of net.Addr interface.
func (a AddrStruct) String() string {
return a.Val
}
var _ net.Addr = &AddrStruct{}

View File

@ -1,148 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"net"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
utilnet "k8s.io/kubernetes/pkg/util/net"
"github.com/golang/glog"
)
const (
IPv4ZeroCIDR = "0.0.0.0/0"
IPv6ZeroCIDR = "::/0"
)
func IsZeroCIDR(cidr string) bool {
if cidr == IPv4ZeroCIDR || cidr == IPv6ZeroCIDR {
return true
}
return false
}
func IsLocalIP(ip string) (bool, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return false, err
}
for i := range addrs {
intf, _, err := net.ParseCIDR(addrs[i].String())
if err != nil {
return false, err
}
if net.ParseIP(ip).Equal(intf) {
return true, nil
}
}
return false, nil
}
func ShouldSkipService(svcName types.NamespacedName, service *api.Service) bool {
// if ClusterIP is "None" or empty, skip proxying
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return true
}
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
if service.Spec.Type == api.ServiceTypeExternalName {
glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
return true
}
return false
}
// GetNodeAddresses return all matched node IP addresses based on given cidr slice.
// Some callers, e.g. IPVS proxier, need concrete IPs, not ranges, which is why this exists.
// NetworkInterfacer is injected for test purpose.
// We expect the cidrs passed in is already validated.
// Given an empty input `[]`, it will return `0.0.0.0/0` and `::/0` directly.
// If multiple cidrs is given, it will return the minimal IP sets, e.g. given input `[1.2.0.0/16, 0.0.0.0/0]`, it will
// only return `0.0.0.0/0`.
// NOTE: GetNodeAddresses only accepts CIDRs, if you want concrete IPs, e.g. 1.2.3.4, then the input should be 1.2.3.4/32.
func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error) {
uniqueAddressList := sets.NewString()
if len(cidrs) == 0 {
uniqueAddressList.Insert(IPv4ZeroCIDR)
uniqueAddressList.Insert(IPv6ZeroCIDR)
return uniqueAddressList, nil
}
// First round of iteration to pick out `0.0.0.0/0` or `::/0` for the sake of excluding non-zero IPs.
for _, cidr := range cidrs {
if IsZeroCIDR(cidr) {
uniqueAddressList.Insert(cidr)
}
}
// Second round of iteration to parse IPs based on cidr.
for _, cidr := range cidrs {
if IsZeroCIDR(cidr) {
continue
}
_, ipNet, _ := net.ParseCIDR(cidr)
itfs, err := nw.Interfaces()
if err != nil {
return nil, fmt.Errorf("error listing all interfaces from host, error: %v", err)
}
for _, itf := range itfs {
addrs, err := nw.Addrs(&itf)
if err != nil {
return nil, fmt.Errorf("error getting address from interface %s, error: %v", itf.Name, err)
}
for _, addr := range addrs {
if addr == nil {
continue
}
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
return nil, fmt.Errorf("error parsing CIDR for interface %s, error: %v", itf.Name, err)
}
if ipNet.Contains(ip) {
if utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
if !utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
}
}
}
}
return uniqueAddressList, nil
}
// LogAndEmitIncorrectIPVersionEvent logs and emits incorrect IP version event.
func LogAndEmitIncorrectIPVersionEvent(recorder record.EventRecorder, fieldName, fieldValue, svcNamespace, svcName string, svcUID types.UID) {
errMsg := fmt.Sprintf("%s in %s has incorrect IP version", fieldValue, fieldName)
glog.Errorf("%s (service %s/%s).", errMsg, svcNamespace, svcName)
if recorder != nil {
recorder.Eventf(
&v1.ObjectReference{
Kind: "Service",
Name: svcName,
Namespace: svcNamespace,
UID: svcUID,
}, v1.EventTypeWarning, "KubeProxyIncorrectIPVersion", errMsg)
}
}

View File

@ -1,329 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"net"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
api "k8s.io/kubernetes/pkg/apis/core"
fake "k8s.io/kubernetes/pkg/proxy/util/testing"
)
func TestShouldSkipService(t *testing.T) {
testCases := []struct {
service *api.Service
svcName types.NamespacedName
shouldSkip bool
}{
{
// Cluster IP is None
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
ClusterIP: api.ClusterIPNone,
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
shouldSkip: true,
},
{
// Cluster IP is empty
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
ClusterIP: "",
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
shouldSkip: true,
},
{
// ExternalName type service
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: api.ServiceTypeExternalName,
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
shouldSkip: true,
},
{
// ClusterIP type service with ClusterIP set
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: api.ServiceTypeClusterIP,
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
shouldSkip: false,
},
{
// NodePort type service with ClusterIP set
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: api.ServiceTypeNodePort,
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
shouldSkip: false,
},
{
// LoadBalancer type service with ClusterIP set
service: &api.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: api.ServiceTypeLoadBalancer,
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
shouldSkip: false,
},
}
for i := range testCases {
skip := ShouldSkipService(testCases[i].svcName, testCases[i].service)
if skip != testCases[i].shouldSkip {
t.Errorf("case %d: expect %v, got %v", i, testCases[i].shouldSkip, skip)
}
}
}
type InterfaceAddrsPair struct {
itf net.Interface
addrs []net.Addr
}
func TestGetNodeAddressses(t *testing.T) {
testCases := []struct {
cidrs []string
nw *fake.FakeNetwork
itfAddrsPairs []InterfaceAddrsPair
expected sets.String
}{
{ // case 0
cidrs: []string{"10.20.30.0/24"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "10.20.30.51/24"}},
},
{
itf: net.Interface{Index: 2, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "100.200.201.1/24"}},
},
},
expected: sets.NewString("10.20.30.51"),
},
{ // case 1
cidrs: []string{"0.0.0.0/0"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "10.20.30.51/24"}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "127.0.0.1/8"}},
},
},
expected: sets.NewString("0.0.0.0/0"),
},
{ // case 2
cidrs: []string{"2001:db8::/32", "::1/128"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "2001:db8::1/32"}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "::1/128"}},
},
},
expected: sets.NewString("2001:db8::1", "::1"),
},
{ // case 3
cidrs: []string{"::/0"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "2001:db8::1/32"}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "::1/128"}},
},
},
expected: sets.NewString("::/0"),
},
{ // case 4
cidrs: []string{"127.0.0.1/32"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "10.20.30.51/24"}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "127.0.0.1/8"}},
},
},
expected: sets.NewString("127.0.0.1"),
},
{ // case 5
cidrs: []string{"127.0.0.0/8"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "127.0.1.1/8"}},
},
},
expected: sets.NewString("127.0.1.1"),
},
{ // case 6
cidrs: []string{"10.20.30.0/24", "100.200.201.0/24"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "10.20.30.51/24"}},
},
{
itf: net.Interface{Index: 2, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "100.200.201.1/24"}},
},
},
expected: sets.NewString("10.20.30.51", "100.200.201.1"),
},
{ // case 7
cidrs: []string{"10.20.30.0/24", "100.200.201.0/24"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "192.168.1.2/24"}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "127.0.0.1/8"}},
},
},
expected: sets.NewString(),
},
{ // case 8
cidrs: []string{},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "192.168.1.2/24"}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "127.0.0.1/8"}},
},
},
expected: sets.NewString("0.0.0.0/0", "::/0"),
},
{ // case 9
cidrs: []string{},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "2001:db8::1/32"}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "::1/128"}},
},
},
expected: sets.NewString("0.0.0.0/0", "::/0"),
},
{ // case 9
cidrs: []string{"1.2.3.0/24", "0.0.0.0/0"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "1.2.3.4/30"}},
},
},
expected: sets.NewString("0.0.0.0/0"),
},
{ // case 10
cidrs: []string{"0.0.0.0/0", "1.2.3.0/24", "::1/128"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "1.2.3.4/30"}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "::1/128"}},
},
},
expected: sets.NewString("0.0.0.0/0", "::1"),
},
{ // case 11
cidrs: []string{"::/0", "1.2.3.0/24", "::1/128"},
nw: fake.NewFakeNetwork(),
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "1.2.3.4/30"}},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{fake.AddrStruct{Val: "::1/128"}},
},
},
expected: sets.NewString("::/0", "1.2.3.4"),
},
}
for i := range testCases {
for _, pair := range testCases[i].itfAddrsPairs {
testCases[i].nw.AddInterfaceAddr(&pair.itf, pair.addrs)
}
addrList, err := GetNodeAddresses(testCases[i].cidrs, testCases[i].nw)
if err != nil {
t.Errorf("case [%d], unexpected error: %v", i, err)
}
if !addrList.Equal(testCases[i].expected) {
t.Errorf("case [%d], unexpected mismatch, expected: %v, got: %v", i, testCases[i].expected, addrList)
}
}
}

View File

@ -1,49 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"metrics.go",
] + select({
"@io_bazel_rules_go//go/platform:windows": [
"proxier.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/proxy/winkernel",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:windows": [
"//pkg/api/service:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/healthcheck:go_default_library",
"//pkg/util/async:go_default_library",
"//vendor/github.com/Microsoft/hcsshim:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,3 +0,0 @@
reviewers:
- dineshgovindasamy
- madhanrm

View File

@ -1,50 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winkernel
import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
)
const kubeProxySubsystem = "kubeproxy"
var (
SyncProxyRulesLatency = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: kubeProxySubsystem,
Name: "sync_proxy_rules_latency_microseconds",
Help: "SyncProxyRules latency",
Buckets: prometheus.ExponentialBuckets(1000, 2, 15),
},
)
)
var registerMetricsOnce sync.Once
func RegisterMetrics() {
registerMetricsOnce.Do(func() {
prometheus.MustRegister(SyncProxyRulesLatency)
})
}
// Gets the time since the specified start in microseconds.
func sinceInMicroseconds(start time.Time) float64 {
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
}

File diff suppressed because it is too large Load Diff

View File

@ -1,63 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"loadbalancer.go",
"proxier.go",
"proxysocket.go",
"roundrobin.go",
"types.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/winuserspace",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/util/ipconfig:go_default_library",
"//pkg/util/netsh:go_default_library",
"//pkg/util/slice:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/miekg/dns:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"proxier_test.go",
"roundrobin_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/util/netsh/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,33 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winuserspace
import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
"net"
)
// LoadBalancer is an interface for distributing incoming requests to service endpoints.
type LoadBalancer interface {
// NextEndpoint returns the endpoint to handle a request for the given
// service-port and source address.
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeMinutes int) error
DeleteService(service proxy.ServicePortName)
CleanupStaleStickySessions(service proxy.ServicePortName)
}

View File

@ -1,482 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winuserspace
import (
"fmt"
"net"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/util/netsh"
)
const allAvailableInterfaces string = ""
type portal struct {
ip string
port int
isExternal bool
}
type serviceInfo struct {
isAliveAtomic int32 // Only access this with atomic ops
portal portal
protocol api.Protocol
socket proxySocket
timeout time.Duration
activeClients *clientCache
dnsClients *dnsClientCache
sessionAffinityType api.ServiceAffinity
}
func (info *serviceInfo) setAlive(b bool) {
var i int32
if b {
i = 1
}
atomic.StoreInt32(&info.isAliveAtomic, i)
}
func (info *serviceInfo) isAlive() bool {
return atomic.LoadInt32(&info.isAliveAtomic) != 0
}
func logTimeout(err error) bool {
if e, ok := err.(net.Error); ok {
if e.Timeout() {
glog.V(3).Infof("connection to endpoint closed due to inactivity")
return true
}
}
return false
}
// Proxier is a simple proxy for TCP connections between a localhost:lport
// and services that provide the actual implementations.
type Proxier struct {
loadBalancer LoadBalancer
mu sync.Mutex // protects serviceMap
serviceMap map[ServicePortPortalName]*serviceInfo
syncPeriod time.Duration
udpIdleTimeout time.Duration
portMapMutex sync.Mutex
portMap map[portMapKey]*portMapValue
numProxyLoops int32 // use atomic ops to access this; mostly for testing
netsh netsh.Interface
hostIP net.IP
}
// assert Proxier is a ProxyProvider
var _ proxy.ProxyProvider = &Proxier{}
// A key for the portMap. The ip has to be a string because slices can't be map
// keys.
type portMapKey struct {
ip string
port int
protocol api.Protocol
}
func (k *portMapKey) String() string {
return fmt.Sprintf("%s/%s", net.JoinHostPort(k.ip, strconv.Itoa(k.port)), k.protocol)
}
// A value for the portMap
type portMapValue struct {
owner ServicePortPortalName
socket interface {
Close() error
}
}
var (
// ErrProxyOnLocalhost is returned by NewProxier if the user requests a proxier on
// the loopback address. May be checked for by callers of NewProxier to know whether
// the caller provided invalid input.
ErrProxyOnLocalhost = fmt.Errorf("cannot proxy on localhost")
)
// Used below.
var localhostIPv4 = net.ParseIP("127.0.0.1")
var localhostIPv6 = net.ParseIP("::1")
// NewProxier returns a new Proxier given a LoadBalancer and an address on
// which to listen. It is assumed that there is only a single Proxier active
// on a machine. An error will be returned if the proxier cannot be started
// due to an invalid ListenIP (loopback)
func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interface, pr utilnet.PortRange, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) {
if listenIP.Equal(localhostIPv4) || listenIP.Equal(localhostIPv6) {
return nil, ErrProxyOnLocalhost
}
hostIP, err := utilnet.ChooseHostInterface()
if err != nil {
return nil, fmt.Errorf("failed to select a host interface: %v", err)
}
glog.V(2).Infof("Setting proxy IP to %v", hostIP)
return createProxier(loadBalancer, listenIP, netsh, hostIP, syncPeriod, udpIdleTimeout)
}
func createProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interface, hostIP net.IP, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) {
return &Proxier{
loadBalancer: loadBalancer,
serviceMap: make(map[ServicePortPortalName]*serviceInfo),
portMap: make(map[portMapKey]*portMapValue),
syncPeriod: syncPeriod,
udpIdleTimeout: udpIdleTimeout,
netsh: netsh,
hostIP: hostIP,
}, nil
}
// Sync is called to immediately synchronize the proxier state
func (proxier *Proxier) Sync() {
proxier.cleanupStaleStickySessions()
}
// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return.
func (proxier *Proxier) SyncLoop() {
t := time.NewTicker(proxier.syncPeriod)
defer t.Stop()
for {
<-t.C
glog.V(6).Infof("Periodic sync")
proxier.Sync()
}
}
// cleanupStaleStickySessions cleans up any stale sticky session records in the hash map.
func (proxier *Proxier) cleanupStaleStickySessions() {
proxier.mu.Lock()
defer proxier.mu.Unlock()
servicePortNameMap := make(map[proxy.ServicePortName]bool)
for name := range proxier.serviceMap {
servicePortName := proxy.ServicePortName{
NamespacedName: types.NamespacedName{
Namespace: name.Namespace,
Name: name.Name,
},
Port: name.Port,
}
if servicePortNameMap[servicePortName] == false {
// ensure cleanup sticky sessions only gets called once per serviceportname
servicePortNameMap[servicePortName] = true
proxier.loadBalancer.CleanupStaleStickySessions(servicePortName)
}
}
}
// This assumes proxier.mu is not locked.
func (proxier *Proxier) stopProxy(service ServicePortPortalName, info *serviceInfo) error {
proxier.mu.Lock()
defer proxier.mu.Unlock()
return proxier.stopProxyInternal(service, info)
}
// This assumes proxier.mu is locked.
func (proxier *Proxier) stopProxyInternal(service ServicePortPortalName, info *serviceInfo) error {
delete(proxier.serviceMap, service)
info.setAlive(false)
err := info.socket.Close()
return err
}
func (proxier *Proxier) getServiceInfo(service ServicePortPortalName) (*serviceInfo, bool) {
proxier.mu.Lock()
defer proxier.mu.Unlock()
info, ok := proxier.serviceMap[service]
return info, ok
}
func (proxier *Proxier) setServiceInfo(service ServicePortPortalName, info *serviceInfo) {
proxier.mu.Lock()
defer proxier.mu.Unlock()
proxier.serviceMap[service] = info
}
// addServicePortPortal starts listening for a new service, returning the serviceInfo.
// The timeout only applies to UDP connections, for now.
func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol api.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) {
var serviceIP net.IP
if listenIP != allAvailableInterfaces {
if serviceIP = net.ParseIP(listenIP); serviceIP == nil {
return nil, fmt.Errorf("could not parse ip '%q'", listenIP)
}
// add the IP address. Node port binds to all interfaces.
args := proxier.netshIpv4AddressAddArgs(serviceIP)
if existed, err := proxier.netsh.EnsureIPAddress(args, serviceIP); err != nil {
return nil, err
} else if !existed {
glog.V(3).Infof("Added ip address to fowarder interface for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
}
}
// add the listener, proxy
sock, err := newProxySocket(protocol, serviceIP, port)
if err != nil {
return nil, err
}
si := &serviceInfo{
isAliveAtomic: 1,
portal: portal{
ip: listenIP,
port: port,
isExternal: false,
},
protocol: protocol,
socket: sock,
timeout: timeout,
activeClients: newClientCache(),
dnsClients: newDNSClientCache(),
sessionAffinityType: api.ServiceAffinityNone, // default
}
proxier.setServiceInfo(servicePortPortalName, si)
glog.V(2).Infof("Proxying for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
go func(service ServicePortPortalName, proxier *Proxier) {
defer runtime.HandleCrash()
atomic.AddInt32(&proxier.numProxyLoops, 1)
sock.ProxyLoop(service, si, proxier)
atomic.AddInt32(&proxier.numProxyLoops, -1)
}(servicePortPortalName, proxier)
return si, nil
}
func (proxier *Proxier) closeServicePortPortal(servicePortPortalName ServicePortPortalName, info *serviceInfo) error {
// turn off the proxy
if err := proxier.stopProxy(servicePortPortalName, info); err != nil {
return err
}
// close the PortalProxy by deleting the service IP address
if info.portal.ip != allAvailableInterfaces {
serviceIP := net.ParseIP(info.portal.ip)
args := proxier.netshIpv4AddressDeleteArgs(serviceIP)
if err := proxier.netsh.DeleteIPAddress(args); err != nil {
return err
}
}
return nil
}
// getListenIPPortMap returns a slice of all listen IPs for a service.
func getListenIPPortMap(service *api.Service, listenPort int, nodePort int) map[string]int {
listenIPPortMap := make(map[string]int)
listenIPPortMap[service.Spec.ClusterIP] = listenPort
for _, ip := range service.Spec.ExternalIPs {
listenIPPortMap[ip] = listenPort
}
for _, ingress := range service.Status.LoadBalancer.Ingress {
listenIPPortMap[ingress.IP] = listenPort
}
if nodePort != 0 {
listenIPPortMap[allAvailableInterfaces] = nodePort
}
return listenIPPortMap
}
func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortalName]bool {
if service == nil {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return nil
}
existingPortPortals := make(map[ServicePortPortalName]bool)
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
// create a slice of all the source IPs to use for service port portals
listenIPPortMap := getListenIPPortMap(service, int(servicePort.Port), int(servicePort.NodePort))
protocol := servicePort.Protocol
for listenIP, listenPort := range listenIPPortMap {
servicePortPortalName := ServicePortPortalName{
NamespacedName: svcName,
Port: servicePort.Name,
PortalIPName: listenIP,
}
existingPortPortals[servicePortPortalName] = true
info, exists := proxier.getServiceInfo(servicePortPortalName)
if exists && sameConfig(info, service, protocol, listenPort) {
// Nothing changed.
continue
}
if exists {
glog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName)
if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
}
}
glog.V(1).Infof("Adding new service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), protocol)
info, err := proxier.addServicePortPortal(servicePortPortalName, protocol, listenIP, listenPort, proxier.udpIdleTimeout)
if err != nil {
glog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err)
continue
}
info.sessionAffinityType = service.Spec.SessionAffinity
glog.V(10).Infof("info: %#v", info)
}
if len(listenIPPortMap) > 0 {
// only one loadbalancer per service port portal
servicePortName := proxy.ServicePortName{
NamespacedName: types.NamespacedName{
Namespace: service.Namespace,
Name: service.Name,
},
Port: servicePort.Name,
}
timeoutSeconds := 0
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
timeoutSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
proxier.loadBalancer.NewService(servicePortName, service.Spec.SessionAffinity, timeoutSeconds)
}
}
return existingPortPortals
}
func (proxier *Proxier) unmergeService(service *api.Service, existingPortPortals map[ServicePortPortalName]bool) {
if service == nil {
return
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return
}
servicePortNameMap := make(map[proxy.ServicePortName]bool)
for name := range existingPortPortals {
servicePortName := proxy.ServicePortName{
NamespacedName: types.NamespacedName{
Namespace: name.Namespace,
Name: name.Name,
},
Port: name.Port,
}
servicePortNameMap[servicePortName] = true
}
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
// create a slice of all the source IPs to use for service port portals
listenIPPortMap := getListenIPPortMap(service, int(servicePort.Port), int(servicePort.NodePort))
for listenIP := range listenIPPortMap {
servicePortPortalName := ServicePortPortalName{
NamespacedName: svcName,
Port: servicePort.Name,
PortalIPName: listenIP,
}
if existingPortPortals[servicePortPortalName] {
continue
}
glog.V(1).Infof("Stopping service %q", servicePortPortalName)
info, exists := proxier.getServiceInfo(servicePortPortalName)
if !exists {
glog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName)
continue
}
if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
}
}
// Only delete load balancer if all listen ips per name/port show inactive.
if !servicePortNameMap[serviceName] {
proxier.loadBalancer.DeleteService(serviceName)
}
}
}
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
_ = proxier.mergeService(service)
}
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
existingPortPortals := proxier.mergeService(service)
proxier.unmergeService(oldService, existingPortPortals)
}
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
proxier.unmergeService(service, map[ServicePortPortalName]bool{})
}
func (proxier *Proxier) OnServiceSynced() {
}
func sameConfig(info *serviceInfo, service *api.Service, protocol api.Protocol, listenPort int) bool {
return info.protocol == protocol && info.portal.port == listenPort && info.sessionAffinityType == service.Spec.SessionAffinity
}
func isTooManyFDsError(err error) bool {
return strings.Contains(err.Error(), "too many open files")
}
func isClosedError(err error) bool {
// A brief discussion about handling closed error here:
// https://code.google.com/p/go/issues/detail?id=4373#c14
// TODO: maybe create a stoppable TCP listener that returns a StoppedError
return strings.HasSuffix(err.Error(), "use of closed network connection")
}
func (proxier *Proxier) netshIpv4AddressAddArgs(destIP net.IP) []string {
intName := proxier.netsh.GetInterfaceToAddIP()
args := []string{
"interface", "ipv4", "add", "address",
"name=" + intName,
"address=" + destIP.String(),
}
return args
}
func (proxier *Proxier) netshIpv4AddressDeleteArgs(destIP net.IP) []string {
intName := proxier.netsh.GetInterfaceToAddIP()
args := []string{
"interface", "ipv4", "delete", "address",
"name=" + intName,
"address=" + destIP.String(),
}
return args
}

View File

@ -1,953 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winuserspace
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strconv"
"sync/atomic"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
netshtest "k8s.io/kubernetes/pkg/util/netsh/testing"
)
const (
udpIdleTimeoutForTest = 250 * time.Millisecond
)
func joinHostPort(host string, port int) string {
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
}
func waitForClosedPortTCP(p *Proxier, proxyPort int) error {
for i := 0; i < 50; i++ {
conn, err := net.Dial("tcp", joinHostPort("", proxyPort))
if err != nil {
return nil
}
conn.Close()
time.Sleep(1 * time.Millisecond)
}
return fmt.Errorf("port %d still open", proxyPort)
}
func waitForClosedPortUDP(p *Proxier, proxyPort int) error {
for i := 0; i < 50; i++ {
conn, err := net.Dial("udp", joinHostPort("", proxyPort))
if err != nil {
return nil
}
conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
// To detect a closed UDP port write, then read.
_, err = conn.Write([]byte("x"))
if err != nil {
if e, ok := err.(net.Error); ok && !e.Timeout() {
return nil
}
}
var buf [4]byte
_, err = conn.Read(buf[0:])
if err != nil {
if e, ok := err.(net.Error); ok && !e.Timeout() {
return nil
}
}
conn.Close()
time.Sleep(1 * time.Millisecond)
}
return fmt.Errorf("port %d still open", proxyPort)
}
// udpEchoServer is a simple echo server in UDP, intended for testing the proxy.
type udpEchoServer struct {
net.PacketConn
}
func newUDPEchoServer() (*udpEchoServer, error) {
packetconn, err := net.ListenPacket("udp", ":0")
if err != nil {
return nil, err
}
return &udpEchoServer{packetconn}, nil
}
func (r *udpEchoServer) Loop() {
var buffer [4096]byte
for {
n, cliAddr, err := r.ReadFrom(buffer[0:])
if err != nil {
fmt.Printf("ReadFrom failed: %v\n", err)
continue
}
r.WriteTo(buffer[0:n], cliAddr)
}
}
var tcpServerPort int32
var udpServerPort int32
func TestMain(m *testing.M) {
// Don't handle panics
runtime.ReallyCrash = true
// TCP setup.
tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.URL.Path[1:]))
}))
defer tcp.Close()
u, err := url.Parse(tcp.URL)
if err != nil {
panic(fmt.Sprintf("failed to parse: %v", err))
}
_, port, err := net.SplitHostPort(u.Host)
if err != nil {
panic(fmt.Sprintf("failed to parse: %v", err))
}
tcpServerPortValue, err := strconv.Atoi(port)
if err != nil {
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
}
tcpServerPort = int32(tcpServerPortValue)
// UDP setup.
udp, err := newUDPEchoServer()
if err != nil {
panic(fmt.Sprintf("failed to make a UDP server: %v", err))
}
_, port, err = net.SplitHostPort(udp.LocalAddr().String())
if err != nil {
panic(fmt.Sprintf("failed to parse: %v", err))
}
udpServerPortValue, err := strconv.Atoi(port)
if err != nil {
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
}
udpServerPort = int32(udpServerPortValue)
go udp.Loop()
ret := m.Run()
// it should be safe to call Close() multiple times.
tcp.Close()
os.Exit(ret)
}
func testEchoTCP(t *testing.T, address string, port int) {
path := "aaaaa"
res, err := http.Get("http://" + address + ":" + fmt.Sprintf("%d", port) + "/" + path)
if err != nil {
t.Fatalf("error connecting to server: %v", err)
}
defer res.Body.Close()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Errorf("error reading data: %v %v", err, string(data))
}
if string(data) != path {
t.Errorf("expected: %s, got %s", path, string(data))
}
}
func testEchoUDP(t *testing.T, address string, port int) {
data := "abc123"
conn, err := net.Dial("udp", joinHostPort(address, port))
if err != nil {
t.Fatalf("error connecting to server: %v", err)
}
if _, err := conn.Write([]byte(data)); err != nil {
t.Fatalf("error sending to server: %v", err)
}
var resp [1024]byte
n, err := conn.Read(resp[0:])
if err != nil {
t.Errorf("error receiving data: %v", err)
}
if string(resp[0:n]) != data {
t.Errorf("expected: %s, got %s", data, string(resp[0:n]))
}
}
func waitForNumProxyLoops(t *testing.T, p *Proxier, want int32) {
var got int32
for i := 0; i < 600; i++ {
got = atomic.LoadInt32(&p.numProxyLoops)
if got == want {
return
}
time.Sleep(100 * time.Millisecond)
}
t.Errorf("expected %d ProxyLoops running, got %d", want, got)
}
func waitForNumProxyClients(t *testing.T, s *serviceInfo, want int, timeout time.Duration) {
var got int
now := time.Now()
deadline := now.Add(timeout)
for time.Now().Before(deadline) {
s.activeClients.mu.Lock()
got = len(s.activeClients.clients)
s.activeClients.mu.Unlock()
if got == want {
return
}
time.Sleep(500 * time.Millisecond)
}
t.Errorf("expected %d ProxyClients live, got %d", want, got)
}
func getPortNum(t *testing.T, addr string) int {
_, portStr, err := net.SplitHostPort(addr)
if err != nil {
t.Errorf("error getting port from %s", addr)
return 0
}
portNum, err := strconv.Atoi(portStr)
if err != nil {
t.Errorf("error getting port from %s", addr)
return 0
}
return portNum
}
func TestTCPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxyTimeout(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
waitForNumProxyLoops(t, p, 1)
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
// When connecting to a UDP service endpoint, there should be a Conn for proxy.
waitForNumProxyClients(t, svcInfo, 1, time.Second)
// If conn has no activity for serviceInfo.timeout since last Read/Write, it should be closed because of timeout.
waitForNumProxyClients(t, svcInfo, 0, 2*time.Second)
}
func TestMultiPortProxy(t *testing.T) {
lb := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
}},
})
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalNameP := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceP.Namespace, Name: serviceP.Name}, Port: serviceP.Port, PortalIPName: listenIP}
svcInfoP, err := p.addServicePortPortal(servicePortPortalNameP, "TCP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfoP.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
servicePortPortalNameQ := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceQ.Namespace, Name: serviceQ.Name}, Port: serviceQ.Port, PortalIPName: listenIP}
svcInfoQ, err := p.addServicePortPortal(servicePortPortalNameQ, "UDP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfoQ.socket.Addr().String()))
waitForNumProxyLoops(t, p, 2)
}
func TestMultiPortOnServiceAdd(t *testing.T) {
lb := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "q"}
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Spec: api.ServiceSpec{ClusterIP: "0.0.0.0", Ports: []api.ServicePort{{
Name: "p",
Port: 0,
Protocol: "TCP",
}, {
Name: "q",
Port: 0,
Protocol: "UDP",
}}},
})
waitForNumProxyLoops(t, p, 2)
servicePortPortalNameP := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceP.Namespace, Name: serviceP.Name}, Port: serviceP.Port, PortalIPName: listenIP}
svcInfo, exists := p.getServiceInfo(servicePortPortalNameP)
if !exists {
t.Fatalf("can't find serviceInfo for %s", servicePortPortalNameP)
}
if svcInfo.portal.ip != "0.0.0.0" || svcInfo.portal.port != 0 || svcInfo.protocol != "TCP" {
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
}
servicePortPortalNameQ := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceQ.Namespace, Name: serviceQ.Name}, Port: serviceQ.Port, PortalIPName: listenIP}
svcInfo, exists = p.getServiceInfo(servicePortPortalNameQ)
if !exists {
t.Fatalf("can't find serviceInfo for %s", servicePortPortalNameQ)
}
if svcInfo.portal.ip != "0.0.0.0" || svcInfo.portal.port != 0 || svcInfo.protocol != "UDP" {
t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo)
}
servicePortPortalNameX := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceX.Namespace, Name: serviceX.Name}, Port: serviceX.Port, PortalIPName: listenIP}
svcInfo, exists = p.getServiceInfo(servicePortPortalNameX)
if exists {
t.Fatalf("found unwanted serviceInfo for %s: %#v", serviceX, svcInfo)
}
}
// Helper: Stops the proxy for the named service.
func stopProxyByName(proxier *Proxier, service ServicePortPortalName) error {
info, found := proxier.getServiceInfo(service)
if !found {
return fmt.Errorf("unknown service: %s", service)
}
return proxier.stopProxy(service, info)
}
func TestTCPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
if !svcInfo.isAlive() {
t.Fatalf("wrong value for isAlive(): expected true")
}
conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
stopProxyByName(p, servicePortPortalName)
if svcInfo.isAlive() {
t.Fatalf("wrong value for isAlive(): expected false")
}
// Wait for the port to really close.
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestUDPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
stopProxyByName(p, servicePortPortalName)
// Wait for the port to really close.
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestTCPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
fmt.Println("here0")
conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "TCP",
}}},
})
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestUDPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "UDP",
}}},
})
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
}
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "TCP",
}}},
})
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "TCP",
}}},
})
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
if !exists {
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
}
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
if err != nil {
t.Fatalf("error connecting to proxy: %v", err)
}
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "UDP",
}}},
})
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
t.Fatalf(err.Error())
}
waitForNumProxyLoops(t, p, 0)
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "UDP",
}}},
})
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
if !exists {
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
}
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
}
func TestTCPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Name: "p",
Port: 0,
Protocol: "TCP",
}}},
})
// Wait for the socket to actually get free.
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
t.Fatalf(err.Error())
}
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
if !exists {
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
}
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
// This is a bit async, but this should be sufficient.
time.Sleep(500 * time.Millisecond)
waitForNumProxyLoops(t, p, 1)
}
func TestUDPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Name: "p",
Port: 0,
Protocol: "UDP",
}}},
})
// Wait for the socket to actually get free.
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
t.Fatalf(err.Error())
}
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
if !exists {
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
}
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
}
func TestProxyUpdatePublicIPs(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
}},
ClusterIP: svcInfo.portal.ip,
ExternalIPs: []string{"0.0.0.0"},
},
})
// Wait for the socket to actually get free.
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
t.Fatalf(err.Error())
}
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
if !exists {
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
}
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
// This is a bit async, but this should be sufficient.
time.Sleep(500 * time.Millisecond)
waitForNumProxyLoops(t, p, 1)
}
func TestProxyUpdatePortal(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil {
t.Fatal(err)
}
waitForNumProxyLoops(t, p, 0)
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
if err != nil {
t.Fatalf("error adding new service: %#v", err)
}
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
svcv0 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
}}},
}
svcv1 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
}}},
}
p.OnServiceUpdate(svcv0, svcv1)
_, exists := p.getServiceInfo(servicePortPortalName)
if exists {
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
}
svcv2 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "TCP",
}}},
}
p.OnServiceUpdate(svcv1, svcv2)
_, exists = p.getServiceInfo(servicePortPortalName)
if exists {
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
}
svcv3 := &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
}}},
}
p.OnServiceUpdate(svcv2, svcv3)
lb.OnEndpointsAdd(endpoint)
svcInfo, exists = p.getServiceInfo(servicePortPortalName)
if !exists {
t.Fatalf("service with ClusterIP set not found in the proxy")
}
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
}
// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in

View File

@ -1,632 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winuserspace
import (
"fmt"
"io"
"net"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/golang/glog"
"github.com/miekg/dns"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/util/ipconfig"
"k8s.io/utils/exec"
)
const (
// Kubernetes DNS suffix search list
// TODO: Get DNS suffix search list from docker containers.
// --dns-search option doesn't work on Windows containers and has been
// fixed recently in docker.
// Kubernetes cluster domain
clusterDomain = "cluster.local"
// Kubernetes service domain
serviceDomain = "svc." + clusterDomain
// Kubernetes default namespace domain
namespaceServiceDomain = "default." + serviceDomain
// Kubernetes DNS service port name
dnsPortName = "dns"
// DNS TYPE value A (a host address)
dnsTypeA uint16 = 0x01
// DNS TYPE value AAAA (a host IPv6 address)
dnsTypeAAAA uint16 = 0x1c
// DNS CLASS value IN (the Internet)
dnsClassInternet uint16 = 0x01
)
// Abstraction over TCP/UDP sockets which are proxied.
type proxySocket interface {
// Addr gets the net.Addr for a proxySocket.
Addr() net.Addr
// Close stops the proxySocket from accepting incoming connections.
// Each implementation should comment on the impact of calling Close
// while sessions are active.
Close() error
// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
ProxyLoop(service ServicePortPortalName, info *serviceInfo, proxier *Proxier)
// ListenPort returns the host port that the proxySocket is listening on
ListenPort() int
}
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (proxySocket, error) {
host := ""
if ip != nil {
host = ip.String()
}
switch strings.ToUpper(string(protocol)) {
case "TCP":
listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
if err != nil {
return nil, err
}
return &tcpProxySocket{Listener: listener, port: port}, nil
case "UDP":
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
return &udpProxySocket{UDPConn: conn, port: port}, nil
}
return nil, fmt.Errorf("unknown protocol %q", protocol)
}
// How long we wait for a connection to a backend in seconds
var endpointDialTimeout = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
// tcpProxySocket implements proxySocket. Close() is implemented by net.Listener. When Close() is called,
// no new connections are allowed but existing connections are left untouched.
type tcpProxySocket struct {
net.Listener
port int
}
func (tcp *tcpProxySocket) ListenPort() int {
return tcp.port
}
func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string, proxier *Proxier) (out net.Conn, err error) {
sessionAffinityReset := false
for _, dialTimeout := range endpointDialTimeout {
servicePortName := proxy.ServicePortName{
NamespacedName: types.NamespacedName{
Namespace: service.Namespace,
Name: service.Name,
},
Port: service.Port,
}
endpoint, err := proxier.loadBalancer.NextEndpoint(servicePortName, srcAddr, sessionAffinityReset)
if err != nil {
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
return nil, err
}
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
if err != nil {
if isTooManyFDsError(err) {
panic("Dial failed: " + err.Error())
}
glog.Errorf("Dial failed: %v", err)
sessionAffinityReset = true
continue
}
return outConn, nil
}
return nil, fmt.Errorf("failed to connect to an endpoint.")
}
func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serviceInfo, proxier *Proxier) {
for {
if !myInfo.isAlive() {
// The service port was closed or replaced.
return
}
// Block until a connection is made.
inConn, err := tcp.Accept()
if err != nil {
if isTooManyFDsError(err) {
panic("Accept failed: " + err.Error())
}
if isClosedError(err) {
return
}
if !myInfo.isAlive() {
// Then the service port was just closed so the accept failure is to be expected.
return
}
glog.Errorf("Accept failed: %v", err)
continue
}
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier)
if err != nil {
glog.Errorf("Failed to connect to balancer: %v", err)
inConn.Close()
continue
}
// Spin up an async copy loop.
go proxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
}
}
// proxyTCP proxies data bi-directionally between in and out.
func proxyTCP(in, out *net.TCPConn) {
var wg sync.WaitGroup
wg.Add(2)
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
go copyBytes("from backend", in, out, &wg)
go copyBytes("to backend", out, in, &wg)
wg.Wait()
}
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
defer wg.Done()
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
n, err := io.Copy(dest, src)
if err != nil {
if !isClosedError(err) {
glog.Errorf("I/O error: %v", err)
}
}
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
dest.Close()
src.Close()
}
// udpProxySocket implements proxySocket. Close() is implemented by net.UDPConn. When Close() is called,
// no new connections are allowed and existing connections are broken.
// TODO: We could lame-duck this ourselves, if it becomes important.
type udpProxySocket struct {
*net.UDPConn
port int
}
func (udp *udpProxySocket) ListenPort() int {
return udp.port
}
func (udp *udpProxySocket) Addr() net.Addr {
return udp.LocalAddr()
}
// Holds all the known UDP clients that have not timed out.
type clientCache struct {
mu sync.Mutex
clients map[string]net.Conn // addr string -> connection
}
func newClientCache() *clientCache {
return &clientCache{clients: map[string]net.Conn{}}
}
// DNS query client classified by address and QTYPE
type dnsClientQuery struct {
clientAddress string
dnsQType uint16
}
// Holds DNS client query, the value contains the index in DNS suffix search list,
// the original DNS message and length for the same client and QTYPE
type dnsClientCache struct {
mu sync.Mutex
clients map[dnsClientQuery]*dnsQueryState
}
type dnsQueryState struct {
searchIndex int32
msg *dns.Msg
}
func newDNSClientCache() *dnsClientCache {
return &dnsClientCache{clients: map[dnsClientQuery]*dnsQueryState{}}
}
func packetRequiresDNSSuffix(dnsType, dnsClass uint16) bool {
return (dnsType == dnsTypeA || dnsType == dnsTypeAAAA) && dnsClass == dnsClassInternet
}
func isDNSService(portName string) bool {
return portName == dnsPortName
}
func appendDNSSuffix(msg *dns.Msg, buffer []byte, length int, dnsSuffix string) (int, error) {
if msg == nil || len(msg.Question) == 0 {
return length, fmt.Errorf("DNS message parameter is invalid")
}
// Save the original name since it will be reused for next iteration
origName := msg.Question[0].Name
if dnsSuffix != "" {
msg.Question[0].Name += dnsSuffix + "."
}
mbuf, err := msg.PackBuffer(buffer)
msg.Question[0].Name = origName
if err != nil {
glog.Warningf("Unable to pack DNS packet. Error is: %v", err)
return length, err
}
if &buffer[0] != &mbuf[0] {
return length, fmt.Errorf("Buffer is too small in packing DNS packet")
}
return len(mbuf), nil
}
func recoverDNSQuestion(origName string, msg *dns.Msg, buffer []byte, length int) (int, error) {
if msg == nil || len(msg.Question) == 0 {
return length, fmt.Errorf("DNS message parameter is invalid")
}
if origName == msg.Question[0].Name {
return length, nil
}
msg.Question[0].Name = origName
if len(msg.Answer) > 0 {
msg.Answer[0].Header().Name = origName
}
mbuf, err := msg.PackBuffer(buffer)
if err != nil {
glog.Warningf("Unable to pack DNS packet. Error is: %v", err)
return length, err
}
if &buffer[0] != &mbuf[0] {
return length, fmt.Errorf("Buffer is too small in packing DNS packet")
}
return len(mbuf), nil
}
func processUnpackedDNSQueryPacket(
dnsClients *dnsClientCache,
msg *dns.Msg,
host string,
dnsQType uint16,
buffer []byte,
length int,
dnsSearch []string) int {
if dnsSearch == nil || len(dnsSearch) == 0 {
glog.V(1).Infof("DNS search list is not initialized and is empty.")
return length
}
// TODO: handle concurrent queries from a client
dnsClients.mu.Lock()
state, found := dnsClients.clients[dnsClientQuery{host, dnsQType}]
if !found {
state = &dnsQueryState{0, msg}
dnsClients.clients[dnsClientQuery{host, dnsQType}] = state
}
dnsClients.mu.Unlock()
index := atomic.SwapInt32(&state.searchIndex, state.searchIndex+1)
// Also update message ID if the client retries due to previous query time out
state.msg.MsgHdr.Id = msg.MsgHdr.Id
if index < 0 || index >= int32(len(dnsSearch)) {
glog.V(1).Infof("Search index %d is out of range.", index)
return length
}
length, err := appendDNSSuffix(msg, buffer, length, dnsSearch[index])
if err != nil {
glog.Errorf("Append DNS suffix failed: %v", err)
}
return length
}
func processUnpackedDNSResponsePacket(
svrConn net.Conn,
dnsClients *dnsClientCache,
msg *dns.Msg,
rcode int,
host string,
dnsQType uint16,
buffer []byte,
length int,
dnsSearch []string) (bool, int) {
var drop bool
var err error
if dnsSearch == nil || len(dnsSearch) == 0 {
glog.V(1).Infof("DNS search list is not initialized and is empty.")
return drop, length
}
dnsClients.mu.Lock()
state, found := dnsClients.clients[dnsClientQuery{host, dnsQType}]
dnsClients.mu.Unlock()
if found {
index := atomic.SwapInt32(&state.searchIndex, state.searchIndex+1)
if rcode != 0 && index >= 0 && index < int32(len(dnsSearch)) {
// If the response has failure and iteration through the search list has not
// reached the end, retry on behalf of the client using the original query message
drop = true
length, err = appendDNSSuffix(state.msg, buffer, length, dnsSearch[index])
if err != nil {
glog.Errorf("Append DNS suffix failed: %v", err)
}
_, err = svrConn.Write(buffer[0:length])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Write failed: %v", err)
}
}
} else {
length, err = recoverDNSQuestion(state.msg.Question[0].Name, msg, buffer, length)
if err != nil {
glog.Errorf("Recover DNS question failed: %v", err)
}
dnsClients.mu.Lock()
delete(dnsClients.clients, dnsClientQuery{host, dnsQType})
dnsClients.mu.Unlock()
}
}
return drop, length
}
func processDNSQueryPacket(
dnsClients *dnsClientCache,
cliAddr net.Addr,
buffer []byte,
length int,
dnsSearch []string) (int, error) {
msg := &dns.Msg{}
if err := msg.Unpack(buffer[:length]); err != nil {
glog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
return length, err
}
// Query - Response bit that specifies whether this message is a query (0) or a response (1).
if msg.MsgHdr.Response == true {
return length, fmt.Errorf("DNS packet should be a query message")
}
// QDCOUNT
if len(msg.Question) != 1 {
glog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question))
glog.V(1).Infof("DNS suffix appending does not support more than one question.")
return length, nil
}
// ANCOUNT, NSCOUNT, ARCOUNT
if len(msg.Answer) != 0 || len(msg.Ns) != 0 || len(msg.Extra) != 0 {
glog.V(1).Infof("DNS packet contains more than question section.")
return length, nil
}
dnsQType := msg.Question[0].Qtype
dnsQClass := msg.Question[0].Qclass
if packetRequiresDNSSuffix(dnsQType, dnsQClass) {
host, _, err := net.SplitHostPort(cliAddr.String())
if err != nil {
glog.V(1).Infof("Failed to get host from client address: %v", err)
host = cliAddr.String()
}
length = processUnpackedDNSQueryPacket(dnsClients, msg, host, dnsQType, buffer, length, dnsSearch)
}
return length, nil
}
func processDNSResponsePacket(
svrConn net.Conn,
dnsClients *dnsClientCache,
cliAddr net.Addr,
buffer []byte,
length int,
dnsSearch []string) (bool, int, error) {
var drop bool
msg := &dns.Msg{}
if err := msg.Unpack(buffer[:length]); err != nil {
glog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
return drop, length, err
}
// Query - Response bit that specifies whether this message is a query (0) or a response (1).
if msg.MsgHdr.Response == false {
return drop, length, fmt.Errorf("DNS packet should be a response message")
}
// QDCOUNT
if len(msg.Question) != 1 {
glog.V(1).Infof("Number of entries in the response section of the DNS packet is: %d", len(msg.Answer))
return drop, length, nil
}
dnsQType := msg.Question[0].Qtype
dnsQClass := msg.Question[0].Qclass
if packetRequiresDNSSuffix(dnsQType, dnsQClass) {
host, _, err := net.SplitHostPort(cliAddr.String())
if err != nil {
glog.V(1).Infof("Failed to get host from client address: %v", err)
host = cliAddr.String()
}
drop, length = processUnpackedDNSResponsePacket(svrConn, dnsClients, msg, msg.MsgHdr.Rcode, host, dnsQType, buffer, length, dnsSearch)
}
return drop, length, nil
}
func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serviceInfo, proxier *Proxier) {
var buffer [4096]byte // 4KiB should be enough for most whole-packets
var dnsSearch []string
if isDNSService(service.Port) {
dnsSearch = []string{"", namespaceServiceDomain, serviceDomain, clusterDomain}
execer := exec.New()
ipconfigInterface := ipconfig.New(execer)
suffixList, err := ipconfigInterface.GetDnsSuffixSearchList()
if err == nil {
for _, suffix := range suffixList {
dnsSearch = append(dnsSearch, suffix)
}
}
}
for {
if !myInfo.isAlive() {
// The service port was closed or replaced.
break
}
// Block until data arrives.
// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
n, cliAddr, err := udp.ReadFrom(buffer[0:])
if err != nil {
if e, ok := err.(net.Error); ok {
if e.Temporary() {
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
continue
}
}
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
break
}
// If this is DNS query packet
if isDNSService(service.Port) {
n, err = processDNSQueryPacket(myInfo.dnsClients, cliAddr, buffer[:], n, dnsSearch)
if err != nil {
glog.Errorf("Process DNS query packet failed: %v", err)
}
}
// If this is a client we know already, reuse the connection and goroutine.
svrConn, err := udp.getBackendConn(myInfo.activeClients, myInfo.dnsClients, cliAddr, proxier, service, myInfo.timeout, dnsSearch)
if err != nil {
continue
}
// TODO: It would be nice to let the goroutine handle this write, but we don't
// really want to copy the buffer. We could do a pool of buffers or something.
_, err = svrConn.Write(buffer[0:n])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Write failed: %v", err)
// TODO: Maybe tear down the goroutine for this client/server pair?
}
continue
}
err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
continue
}
}
}
func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, dnsClients *dnsClientCache, cliAddr net.Addr, proxier *Proxier, service ServicePortPortalName, timeout time.Duration, dnsSearch []string) (net.Conn, error) {
activeClients.mu.Lock()
defer activeClients.mu.Unlock()
svrConn, found := activeClients.clients[cliAddr.String()]
if !found {
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
glog.V(3).Infof("New UDP connection from %s", cliAddr)
var err error
svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
if err != nil {
return nil, err
}
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
glog.Errorf("SetDeadline failed: %v", err)
return nil, err
}
activeClients.clients[cliAddr.String()] = svrConn
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, dnsClients *dnsClientCache, service ServicePortPortalName, timeout time.Duration, dnsSearch []string) {
defer runtime.HandleCrash()
udp.proxyClient(cliAddr, svrConn, activeClients, dnsClients, service, timeout, dnsSearch)
}(cliAddr, svrConn, activeClients, dnsClients, service, timeout, dnsSearch)
}
return svrConn, nil
}
// This function is expected to be called as a goroutine.
// TODO: Track and log bytes copied, like TCP
func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, dnsClients *dnsClientCache, service ServicePortPortalName, timeout time.Duration, dnsSearch []string) {
defer svrConn.Close()
var buffer [4096]byte
for {
n, err := svrConn.Read(buffer[0:])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Read failed: %v", err)
}
break
}
drop := false
if isDNSService(service.Port) {
drop, n, err = processDNSResponsePacket(svrConn, dnsClients, cliAddr, buffer[:], n, dnsSearch)
if err != nil {
glog.Errorf("Process DNS response packet failed: %v", err)
}
}
if !drop {
err = svrConn.SetDeadline(time.Now().Add(timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
break
}
n, err = udp.WriteTo(buffer[0:n], cliAddr)
if err != nil {
if !logTimeout(err) {
glog.Errorf("WriteTo failed: %v", err)
}
break
}
}
}
activeClients.mu.Lock()
delete(activeClients.clients, cliAddr.String())
activeClients.mu.Unlock()
}

View File

@ -1,374 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winuserspace
import (
"errors"
"fmt"
"net"
"reflect"
"strconv"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/util/slice"
)
var (
ErrMissingServiceEntry = errors.New("missing service entry")
ErrMissingEndpoints = errors.New("missing endpoints")
)
type affinityState struct {
clientIP string
//clientProtocol api.Protocol //not yet used
//sessionCookie string //not yet used
endpoint string
lastUsed time.Time
}
type affinityPolicy struct {
affinityType api.ServiceAffinity
affinityMap map[string]*affinityState // map client IP -> affinity info
ttlSeconds int
}
// LoadBalancerRR is a round-robin load balancer.
type LoadBalancerRR struct {
lock sync.RWMutex
services map[proxy.ServicePortName]*balancerState
}
// Ensure this implements LoadBalancer.
var _ LoadBalancer = &LoadBalancerRR{}
type balancerState struct {
endpoints []string // a list of "ip:port" style strings
index int // current index into endpoints
affinity affinityPolicy
}
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlSeconds int) *affinityPolicy {
return &affinityPolicy{
affinityType: affinityType,
affinityMap: make(map[string]*affinityState),
ttlSeconds: ttlSeconds,
}
}
// NewLoadBalancerRR returns a new LoadBalancerRR.
func NewLoadBalancerRR() *LoadBalancerRR {
return &LoadBalancerRR{
services: map[proxy.ServicePortName]*balancerState{},
}
}
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) error {
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
return nil
}
// This assumes that lb.lock is already held.
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) *balancerState {
if ttlSeconds == 0 {
ttlSeconds = int(api.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
}
if _, exists := lb.services[svcPort]; !exists {
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
} else if affinityType != "" {
lb.services[svcPort].affinity.affinityType = affinityType
}
return lb.services[svcPort]
}
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
delete(lb.services, svcPort)
}
// return true if this service is using some form of session affinity.
func isSessionAffinity(affinity *affinityPolicy) bool {
// Should never be empty string, but checking for it to be safe.
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
return false
}
return true
}
// NextEndpoint returns a service endpoint.
// The service endpoint is chosen using the round-robin algorithm.
func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) {
// Coarse locking is simple. We can get more fine-grained if/when we
// can prove it matters.
lb.lock.Lock()
defer lb.lock.Unlock()
state, exists := lb.services[svcPort]
if !exists || state == nil {
return "", ErrMissingServiceEntry
}
if len(state.endpoints) == 0 {
return "", ErrMissingEndpoints
}
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
var ipaddr string
if sessionAffinityEnabled {
// Caution: don't shadow ipaddr
var err error
ipaddr, _, err = net.SplitHostPort(srcAddr.String())
if err != nil {
return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err)
}
if !sessionAffinityReset {
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
// Affinity wins.
endpoint := sessionAffinity.endpoint
sessionAffinity.lastUsed = time.Now()
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
return endpoint, nil
}
}
}
// Take the next endpoint.
endpoint := state.endpoints[state.index]
state.index = (state.index + 1) % len(state.endpoints)
if sessionAffinityEnabled {
var affinity *affinityState
affinity = state.affinity.affinityMap[ipaddr]
if affinity == nil {
affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()}
state.affinity.affinityMap[ipaddr] = affinity
}
affinity.lastUsed = time.Now()
affinity.endpoint = endpoint
affinity.clientIP = ipaddr
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
}
return endpoint, nil
}
type hostPortPair struct {
host string
port int
}
func isValidEndpoint(hpp *hostPortPair) bool {
return hpp.host != "" && hpp.port > 0
}
func flattenValidEndpoints(endpoints []hostPortPair) []string {
// Convert Endpoint objects into strings for easier use later. Ignore
// the protocol field - we'll get that from the Service objects.
var result []string
for i := range endpoints {
hpp := &endpoints[i]
if isValidEndpoint(hpp) {
result = append(result, net.JoinHostPort(hpp.host, strconv.Itoa(hpp.port)))
}
}
return result
}
// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
for _, affinity := range state.affinity.affinityMap {
if affinity.endpoint == endpoint {
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
delete(state.affinity.affinityMap, affinity.clientIP)
}
}
}
// Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
// Then remove any session affinity records that are not in both lists.
// This assumes the lb.lock is held.
func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) {
allEndpoints := map[string]int{}
for _, newEndpoint := range newEndpoints {
allEndpoints[newEndpoint] = 1
}
state, exists := lb.services[svcPort]
if !exists {
return
}
for _, existingEndpoint := range state.endpoints {
allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1
}
for mKey, mVal := range allEndpoints {
if mVal == 1 {
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
removeSessionAffinityByEndpoint(state, svcPort, mKey)
}
}
}
// buildPortsToEndpointsMap builds a map of portname -> all ip:ports for that
// portname. Explode Endpoints.Subsets[*] into this structure.
func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPair {
portsToEndpoints := map[string][]hostPortPair{}
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
for i := range ss.Ports {
port := &ss.Ports[i]
for i := range ss.Addresses {
addr := &ss.Addresses[i]
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)})
// Ignore the protocol field - we'll get that from the Service objects.
}
}
}
return portsToEndpoints
}
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
defer lb.lock.Unlock()
for portname := range portsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
state, exists := lb.services[svcPort]
if !exists || state == nil || len(newEndpoints) > 0 {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsAdd can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
state.index = 0
}
}
}
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
oldPortsToEndpoints := buildPortsToEndpointsMap(oldEndpoints)
registeredEndpoints := make(map[proxy.ServicePortName]bool)
lb.lock.Lock()
defer lb.lock.Unlock()
for portname := range portsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
state, exists := lb.services[svcPort]
curEndpoints := []string{}
if state != nil {
curEndpoints = state.endpoints
}
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsUpdate can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
state.index = 0
}
registeredEndpoints[svcPort] = true
}
for portname := range oldPortsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
if _, exists := registeredEndpoints[svcPort]; !exists {
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
// Reset but don't delete.
state := lb.services[svcPort]
state.endpoints = []string{}
state.index = 0
state.affinity.affinityMap = map[string]*affinityState{}
}
}
}
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
defer lb.lock.Unlock()
for portname := range portsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
// If the service is still around, reset but don't delete.
if state, ok := lb.services[svcPort]; ok {
state.endpoints = []string{}
state.index = 0
state.affinity.affinityMap = map[string]*affinityState{}
}
}
}
func (lb *LoadBalancerRR) OnEndpointsSynced() {
}
// Tests whether two slices are equivalent. This sorts both slices in-place.
func slicesEquiv(lhs, rhs []string) bool {
if len(lhs) != len(rhs) {
return false
}
if reflect.DeepEqual(slice.SortStrings(lhs), slice.SortStrings(rhs)) {
return true
}
return false
}
func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) {
lb.lock.Lock()
defer lb.lock.Unlock()
state, exists := lb.services[svcPort]
if !exists {
return
}
for ip, affinity := range state.affinity.affinityMap {
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
delete(state.affinity.affinityMap, ip)
}
}
}

View File

@ -1,717 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winuserspace
import (
"net"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
)
func TestValidateWorks(t *testing.T) {
if isValidEndpoint(&hostPortPair{}) {
t.Errorf("Didn't fail for empty set")
}
if isValidEndpoint(&hostPortPair{host: "foobar"}) {
t.Errorf("Didn't fail with invalid port")
}
if isValidEndpoint(&hostPortPair{host: "foobar", port: -1}) {
t.Errorf("Didn't fail with a negative port")
}
if !isValidEndpoint(&hostPortPair{host: "foobar", port: 8080}) {
t.Errorf("Failed a valid config.")
}
}
func TestFilterWorks(t *testing.T) {
endpoints := []hostPortPair{
{host: "foobar", port: 1},
{host: "foobar", port: 2},
{host: "foobar", port: -1},
{host: "foobar", port: 3},
{host: "foobar", port: -2},
}
filtered := flattenValidEndpoints(endpoints)
if len(filtered) != 3 {
t.Errorf("Failed to filter to the correct size")
}
if filtered[0] != "foobar:1" {
t.Errorf("Index zero is not foobar:1")
}
if filtered[1] != "foobar:2" {
t.Errorf("Index one is not foobar:2")
}
if filtered[2] != "foobar:3" {
t.Errorf("Index two is not foobar:3")
}
}
func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil {
t.Errorf("Didn't fail with non-existent service")
}
if len(endpoint) != 0 {
t.Errorf("Got an endpoint")
}
}
func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false)
if err != nil {
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
}
if endpoint != expected {
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
}
}
func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true)
if err != nil {
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
}
if endpoint != expected {
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
}
}
func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
}
func stringsInSlice(haystack []string, needles ...string) bool {
for _, needle := range needles {
found := false
for i := range haystack {
if haystack[i] == needle {
found = true
break
}
}
if found == false {
return false
}
}
return true
}
func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
shuffledEndpoints := loadBalancer.services[service].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
}
func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
}
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpointsv1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
},
},
}
loadBalancer.OnEndpointsAdd(endpointsv1)
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
shuffledEndpoints = loadBalancer.services[serviceP].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") {
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
}
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
}
func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"}
endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
},
},
}
endpoints2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
},
},
}
loadBalancer.OnEndpointsAdd(endpoints1)
loadBalancer.OnEndpointsAdd(endpoints2)
shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
// Then update the configuration by removing foo
loadBalancer.OnEndpointsDelete(endpoints1)
endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// but bar is still there, and we continue RR from where we left off.
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
}
func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
}
func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// Call OnEndpointsUpdate() before NewService()
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep1, client1)
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep2, client2)
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
expectEndpoint(t, loadBalancer, service, ep1, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpoint(t, loadBalancer, service, ep3, client3)
}
func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0}
client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0}
client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0}
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
loadBalancer.OnEndpointsAdd(endpointsv1)
shuffledEndpoints := loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
client1Endpoint := shuffledEndpoints[0]
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
client2Endpoint := shuffledEndpoints[1]
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
client3Endpoint := shuffledEndpoints[2]
endpointsv2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
shuffledEndpoints = loadBalancer.services[service].endpoints
if client1Endpoint == "endpoint:3" {
client1Endpoint = shuffledEndpoints[0]
} else if client2Endpoint == "endpoint:3" {
client2Endpoint = shuffledEndpoints[0]
} else if client3Endpoint == "endpoint:3" {
client3Endpoint = shuffledEndpoints[0]
}
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
endpointsv3 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
shuffledEndpoints = loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6)
}
func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
loadBalancer.OnEndpointsAdd(endpointsv1)
shuffledEndpoints := loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
shuffledEndpoints = loadBalancer.services[service].endpoints
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
}
func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
loadBalancer := NewLoadBalancerRR()
fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints1 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints2 := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
Subsets: []api.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
loadBalancer.OnEndpointsAdd(endpoints1)
loadBalancer.OnEndpointsAdd(endpoints2)
shuffledFooEndpoints := loadBalancer.services[fooService].endpoints
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
shuffledBarEndpoints := loadBalancer.services[barService].endpoints
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
// Then update the configuration by removing foo
loadBalancer.OnEndpointsDelete(endpoints1)
endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// but bar is still there, and we continue RR from where we left off.
shuffledBarEndpoints = loadBalancer.services[barService].endpoints
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
}
func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
loadBalancer := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
if err != nil {
t.Errorf("Didn't find a service for %s: %v", service, err)
}
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
expectEndpoint(t, loadBalancer, service, ep2, client2)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
}

View File

@ -1,35 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winuserspace
import (
"fmt"
"k8s.io/apimachinery/pkg/types"
)
// ServicePortPortalName carries a namespace + name + portname + portalip. This is the unique
// identifier for a windows service port portal.
type ServicePortPortalName struct {
types.NamespacedName
Port string
PortalIPName string
}
func (spn ServicePortPortalName) String() string {
return fmt.Sprintf("%s:%s:%s", spn.NamespacedName.String(), spn.Port, spn.PortalIPName)
}