mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor files
This commit is contained in:
41
vendor/k8s.io/kubernetes/pkg/proxy/BUILD
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/pkg/proxy/BUILD
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy",
|
||||
deps = ["//vendor/k8s.io/apimachinery/pkg/types:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/proxy/apis/kubeproxyconfig:all-srcs",
|
||||
"//pkg/proxy/config:all-srcs",
|
||||
"//pkg/proxy/healthcheck:all-srcs",
|
||||
"//pkg/proxy/iptables:all-srcs",
|
||||
"//pkg/proxy/ipvs:all-srcs",
|
||||
"//pkg/proxy/metrics:all-srcs",
|
||||
"//pkg/proxy/userspace:all-srcs",
|
||||
"//pkg/proxy/util:all-srcs",
|
||||
"//pkg/proxy/winkernel:all-srcs",
|
||||
"//pkg/proxy/winuserspace:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
13
vendor/k8s.io/kubernetes/pkg/proxy/OWNERS
generated
vendored
Normal file
13
vendor/k8s.io/kubernetes/pkg/proxy/OWNERS
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
approvers:
|
||||
- thockin
|
||||
- matchstick
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- brendandburns
|
||||
- vishh
|
||||
- justinsb
|
||||
- freehan
|
||||
- dcbw
|
||||
- m1093782566
|
40
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/BUILD
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/BUILD
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig",
|
||||
deps = [
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/proxy/apis/kubeproxyconfig/scheme:all-srcs",
|
||||
"//pkg/proxy/apis/kubeproxyconfig/v1alpha1:all-srcs",
|
||||
"//pkg/proxy/apis/kubeproxyconfig/validation:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
4
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/OWNERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- thockin
|
||||
reviewers:
|
||||
- sig-network-reviewers
|
19
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
package kubeproxyconfig // import "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
|
51
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/register.go
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/register.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubeproxyconfig
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "kubeproxy.config.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
// TODO this will get cleaned up with the scheme types are fixed
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&KubeProxyConfiguration{},
|
||||
)
|
||||
return nil
|
||||
}
|
28
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme/BUILD
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme/BUILD
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["scheme.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
|
||||
"//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
42
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme/scheme.go
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme/scheme.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheme
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
// Scheme defines methods for serializing and deserializing API objects.
|
||||
Scheme = runtime.NewScheme()
|
||||
// Codecs provides methods for retrieving codecs and serializers for specific
|
||||
// versions and content types.
|
||||
Codecs = serializer.NewCodecFactory(Scheme)
|
||||
)
|
||||
|
||||
func init() {
|
||||
AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
// AddToScheme adds the types of this group into the given scheme.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
v1alpha1.AddToScheme(scheme)
|
||||
kubeproxyconfig.AddToScheme(scheme)
|
||||
}
|
257
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/types.go
generated
vendored
Normal file
257
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/types.go
generated
vendored
Normal file
@ -0,0 +1,257 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubeproxyconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ClientConnectionConfiguration contains details for constructing a client.
|
||||
type ClientConnectionConfiguration struct {
|
||||
// kubeConfigFile is the path to a kubeconfig file.
|
||||
KubeConfigFile string
|
||||
// acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
|
||||
// default value of 'application/json'. This field will control all connections to the server used by a particular
|
||||
// client.
|
||||
AcceptContentTypes string
|
||||
// contentType is the content type used when sending data to the server from this client.
|
||||
ContentType string
|
||||
// qps controls the number of queries per second allowed for this connection.
|
||||
QPS float32
|
||||
// burst allows extra queries to accumulate when a client is exceeding its rate.
|
||||
Burst int32
|
||||
}
|
||||
|
||||
// KubeProxyIPTablesConfiguration contains iptables-related configuration
|
||||
// details for the Kubernetes proxy server.
|
||||
type KubeProxyIPTablesConfiguration struct {
|
||||
// masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
|
||||
// the pure iptables proxy mode. Values must be within the range [0, 31].
|
||||
MasqueradeBit *int32
|
||||
// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
|
||||
MasqueradeAll bool
|
||||
// syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
|
||||
// '2h22m'). Must be greater than 0.
|
||||
SyncPeriod metav1.Duration
|
||||
// minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m',
|
||||
// '2h22m').
|
||||
MinSyncPeriod metav1.Duration
|
||||
}
|
||||
|
||||
// KubeProxyIPVSConfiguration contains ipvs-related configuration
|
||||
// details for the Kubernetes proxy server.
|
||||
type KubeProxyIPVSConfiguration struct {
|
||||
// syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m',
|
||||
// '2h22m'). Must be greater than 0.
|
||||
SyncPeriod metav1.Duration
|
||||
// minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m',
|
||||
// '2h22m').
|
||||
MinSyncPeriod metav1.Duration
|
||||
// ipvs scheduler
|
||||
Scheduler string
|
||||
}
|
||||
|
||||
// KubeProxyConntrackConfiguration contains conntrack settings for
|
||||
// the Kubernetes proxy server.
|
||||
type KubeProxyConntrackConfiguration struct {
|
||||
// max is the maximum number of NAT connections to track (0 to
|
||||
// leave as-is). This takes precedence over maxPerCore and min.
|
||||
Max *int32
|
||||
// maxPerCore is the maximum number of NAT connections to track
|
||||
// per CPU core (0 to leave the limit as-is and ignore min).
|
||||
MaxPerCore *int32
|
||||
// min is the minimum value of connect-tracking records to allocate,
|
||||
// regardless of maxPerCore (set maxPerCore=0 to leave the limit as-is).
|
||||
Min *int32
|
||||
// tcpEstablishedTimeout is how long an idle TCP connection will be kept open
|
||||
// (e.g. '2s'). Must be greater than 0 to set.
|
||||
TCPEstablishedTimeout *metav1.Duration
|
||||
// tcpCloseWaitTimeout is how long an idle conntrack entry
|
||||
// in CLOSE_WAIT state will remain in the conntrack
|
||||
// table. (e.g. '60s'). Must be greater than 0 to set.
|
||||
TCPCloseWaitTimeout *metav1.Duration
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// KubeProxyConfiguration contains everything necessary to configure the
|
||||
// Kubernetes proxy server.
|
||||
type KubeProxyConfiguration struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// featureGates is a comma-separated list of key=value pairs that control
|
||||
// which alpha/beta features are enabled.
|
||||
//
|
||||
// TODO this really should be a map but that requires refactoring all
|
||||
// components to use config files because local-up-cluster.sh only supports
|
||||
// the --feature-gates flag right now, which is comma-separated key=value
|
||||
// pairs.
|
||||
FeatureGates string
|
||||
|
||||
// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
|
||||
// for all interfaces)
|
||||
BindAddress string
|
||||
// healthzBindAddress is the IP address and port for the health check server to serve on,
|
||||
// defaulting to 0.0.0.0:10256
|
||||
HealthzBindAddress string
|
||||
// metricsBindAddress is the IP address and port for the metrics server to serve on,
|
||||
// defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)
|
||||
MetricsBindAddress string
|
||||
// enableProfiling enables profiling via web interface on /debug/pprof handler.
|
||||
// Profiling handlers will be handled by metrics server.
|
||||
EnableProfiling bool
|
||||
// clusterCIDR is the CIDR range of the pods in the cluster. It is used to
|
||||
// bridge traffic coming from outside of the cluster. If not provided,
|
||||
// no off-cluster bridging will be performed.
|
||||
ClusterCIDR string
|
||||
// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
|
||||
HostnameOverride string
|
||||
// clientConnection specifies the kubeconfig file and client connection settings for the proxy
|
||||
// server to use when communicating with the apiserver.
|
||||
ClientConnection ClientConnectionConfiguration
|
||||
// iptables contains iptables-related configuration options.
|
||||
IPTables KubeProxyIPTablesConfiguration
|
||||
// ipvs contains ipvs-related configuration options.
|
||||
IPVS KubeProxyIPVSConfiguration
|
||||
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
|
||||
// the range [-1000, 1000]
|
||||
OOMScoreAdj *int32
|
||||
// mode specifies which proxy mode to use.
|
||||
Mode ProxyMode
|
||||
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
||||
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
|
||||
PortRange string
|
||||
// resourceContainer is the absolute name of the resource-only container to create and run
|
||||
// the Kube-proxy in (Default: /kube-proxy).
|
||||
ResourceContainer string
|
||||
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
|
||||
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||
UDPIdleTimeout metav1.Duration
|
||||
// conntrack contains conntrack-related configuration options.
|
||||
Conntrack KubeProxyConntrackConfiguration
|
||||
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
|
||||
// than 0.
|
||||
ConfigSyncPeriod metav1.Duration
|
||||
}
|
||||
|
||||
// Currently, four modes of proxying are available total: 'userspace' (older, stable), 'iptables'
|
||||
// (newer, faster), 'ipvs', and 'kernelspace' (Windows only, newer).
|
||||
//
|
||||
// If blank, use the best-available proxy (currently iptables, but may change in
|
||||
// future versions). If the iptables proxy is selected, regardless of how, but
|
||||
// the system's kernel or iptables versions are insufficient, this always falls
|
||||
// back to the userspace proxy.
|
||||
type ProxyMode string
|
||||
|
||||
const (
|
||||
ProxyModeUserspace ProxyMode = "userspace"
|
||||
ProxyModeIPTables ProxyMode = "iptables"
|
||||
ProxyModeIPVS ProxyMode = "ipvs"
|
||||
ProxyModeKernelspace ProxyMode = "kernelspace"
|
||||
)
|
||||
|
||||
// IPVSSchedulerMethod is the algorithm for allocating TCP connections and
|
||||
// UDP datagrams to real servers. Scheduling algorithms are imple-
|
||||
//wanted as kernel modules. Ten are shipped with the Linux Virtual Server.
|
||||
type IPVSSchedulerMethod string
|
||||
|
||||
const (
|
||||
// RoundRobin distributes jobs equally amongst the available real servers.
|
||||
RoundRobin IPVSSchedulerMethod = "rr"
|
||||
// WeightedRoundRobin assigns jobs to real servers proportionally to there real servers' weight.
|
||||
// Servers with higher weights receive new jobs first and get more jobs than servers with lower weights.
|
||||
// Servers with equal weights get an equal distribution of new jobs.
|
||||
WeightedRoundRobin IPVSSchedulerMethod = "wrr"
|
||||
// LeastConnection assigns more jobs to real servers with fewer active jobs.
|
||||
LeastConnection IPVSSchedulerMethod = "lc"
|
||||
// WeightedLeastConnection assigns more jobs to servers with fewer jobs and
|
||||
// relative to the real servers' weight(Ci/Wi).
|
||||
WeightedLeastConnection IPVSSchedulerMethod = "wlc"
|
||||
// LocalityBasedLeastConnection assigns jobs destined for the same IP address to the same server if
|
||||
// the server is not overloaded and available; otherwise assigns jobs to servers with fewer jobs,
|
||||
// and keep it for future assignment.
|
||||
LocalityBasedLeastConnection IPVSSchedulerMethod = "lblc"
|
||||
// LocalityBasedLeastConnectionWithReplication with Replication assigns jobs destined for the same IP address to the
|
||||
// least-connection node in the server set for the IP address. If all the node in the server set are overloaded,
|
||||
// it picks up a node with fewer jobs in the cluster and adds it to the sever set for the target.
|
||||
// If the server set has not been modified for the specified time, the most loaded node is removed from the server set,
|
||||
// in order to avoid high degree of replication.
|
||||
LocalityBasedLeastConnectionWithReplication IPVSSchedulerMethod = "lblcr"
|
||||
// SourceHashing assigns jobs to servers through looking up a statically assigned hash table
|
||||
// by their source IP addresses.
|
||||
SourceHashing IPVSSchedulerMethod = "sh"
|
||||
// DestinationHashing assigns jobs to servers through looking up a statically assigned hash table
|
||||
// by their destination IP addresses.
|
||||
DestinationHashing IPVSSchedulerMethod = "dh"
|
||||
// ShortestExpectedDelay assigns an incoming job to the server with the shortest expected delay.
|
||||
// The expected delay that the job will experience is (Ci + 1) / Ui if sent to the ith server, in which
|
||||
// Ci is the number of jobs on the the ith server and Ui is the fixed service rate (weight) of the ith server.
|
||||
ShortestExpectedDelay IPVSSchedulerMethod = "sed"
|
||||
// NeverQueue assigns an incoming job to an idle server if there is, instead of waiting for a fast one;
|
||||
// if all the servers are busy, it adopts the ShortestExpectedDelay policy to assign the job.
|
||||
NeverQueue IPVSSchedulerMethod = "nq"
|
||||
)
|
||||
|
||||
func (m *ProxyMode) Set(s string) error {
|
||||
*m = ProxyMode(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProxyMode) String() string {
|
||||
if m != nil {
|
||||
return string(*m)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ProxyMode) Type() string {
|
||||
return "ProxyMode"
|
||||
}
|
||||
|
||||
type ConfigurationMap map[string]string
|
||||
|
||||
func (m *ConfigurationMap) String() string {
|
||||
pairs := []string{}
|
||||
for k, v := range *m {
|
||||
pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
sort.Strings(pairs)
|
||||
return strings.Join(pairs, ",")
|
||||
}
|
||||
|
||||
func (m *ConfigurationMap) Set(value string) error {
|
||||
for _, s := range strings.Split(value, ",") {
|
||||
if len(s) == 0 {
|
||||
continue
|
||||
}
|
||||
arr := strings.SplitN(s, "=", 2)
|
||||
if len(arr) == 2 {
|
||||
(*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1])
|
||||
} else {
|
||||
(*m)[strings.TrimSpace(arr[0])] = ""
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*ConfigurationMap) Type() string {
|
||||
return "mapStringString"
|
||||
}
|
43
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/BUILD
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/BUILD
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"defaults.go",
|
||||
"doc.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"zz_generated.conversion.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
"zz_generated.defaults.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1",
|
||||
deps = [
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
119
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/defaults.go
generated
vendored
Normal file
119
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/defaults.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/util/pointer"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
|
||||
func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) {
|
||||
if len(obj.BindAddress) == 0 {
|
||||
obj.BindAddress = "0.0.0.0"
|
||||
}
|
||||
if obj.HealthzBindAddress == "" {
|
||||
obj.HealthzBindAddress = fmt.Sprintf("0.0.0.0:%v", ports.ProxyHealthzPort)
|
||||
} else if !strings.Contains(obj.HealthzBindAddress, ":") {
|
||||
obj.HealthzBindAddress += fmt.Sprintf(":%v", ports.ProxyHealthzPort)
|
||||
}
|
||||
if obj.MetricsBindAddress == "" {
|
||||
obj.MetricsBindAddress = fmt.Sprintf("127.0.0.1:%v", ports.ProxyStatusPort)
|
||||
} else if !strings.Contains(obj.MetricsBindAddress, ":") {
|
||||
obj.MetricsBindAddress += fmt.Sprintf(":%v", ports.ProxyStatusPort)
|
||||
}
|
||||
if obj.OOMScoreAdj == nil {
|
||||
temp := int32(qos.KubeProxyOOMScoreAdj)
|
||||
obj.OOMScoreAdj = &temp
|
||||
}
|
||||
if obj.ResourceContainer == "" {
|
||||
obj.ResourceContainer = "/kube-proxy"
|
||||
}
|
||||
if obj.IPTables.SyncPeriod.Duration == 0 {
|
||||
obj.IPTables.SyncPeriod = metav1.Duration{Duration: 30 * time.Second}
|
||||
}
|
||||
if obj.IPVS.SyncPeriod.Duration == 0 {
|
||||
obj.IPVS.SyncPeriod = metav1.Duration{Duration: 30 * time.Second}
|
||||
}
|
||||
zero := metav1.Duration{}
|
||||
if obj.UDPIdleTimeout == zero {
|
||||
obj.UDPIdleTimeout = metav1.Duration{Duration: 250 * time.Millisecond}
|
||||
}
|
||||
// If ConntrackMax is set, respect it.
|
||||
if obj.Conntrack.Max == nil {
|
||||
// If ConntrackMax is *not* set, use per-core scaling.
|
||||
if obj.Conntrack.MaxPerCore == nil {
|
||||
obj.Conntrack.MaxPerCore = pointer.Int32Ptr(32 * 1024)
|
||||
}
|
||||
if obj.Conntrack.Min == nil {
|
||||
obj.Conntrack.Min = pointer.Int32Ptr(128 * 1024)
|
||||
}
|
||||
}
|
||||
if obj.IPTables.MasqueradeBit == nil {
|
||||
temp := int32(14)
|
||||
obj.IPTables.MasqueradeBit = &temp
|
||||
}
|
||||
if obj.Conntrack.TCPEstablishedTimeout == nil {
|
||||
obj.Conntrack.TCPEstablishedTimeout = &metav1.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)
|
||||
}
|
||||
if obj.Conntrack.TCPCloseWaitTimeout == nil {
|
||||
// See https://github.com/kubernetes/kubernetes/issues/32551.
|
||||
//
|
||||
// CLOSE_WAIT conntrack state occurs when the Linux kernel
|
||||
// sees a FIN from the remote server. Note: this is a half-close
|
||||
// condition that persists as long as the local side keeps the
|
||||
// socket open. The condition is rare as it is typical in most
|
||||
// protocols for both sides to issue a close; this typically
|
||||
// occurs when the local socket is lazily garbage collected.
|
||||
//
|
||||
// If the CLOSE_WAIT conntrack entry expires, then FINs from the
|
||||
// local socket will not be properly SNAT'd and will not reach the
|
||||
// remote server (if the connection was subject to SNAT). If the
|
||||
// remote timeouts for FIN_WAIT* states exceed the CLOSE_WAIT
|
||||
// timeout, then there will be an inconsistency in the state of
|
||||
// the connection and a new connection reusing the SNAT (src,
|
||||
// port) pair may be rejected by the remote side with RST. This
|
||||
// can cause new calls to connect(2) to return with ECONNREFUSED.
|
||||
//
|
||||
// We set CLOSE_WAIT to one hour by default to better match
|
||||
// typical server timeouts.
|
||||
obj.Conntrack.TCPCloseWaitTimeout = &metav1.Duration{Duration: 1 * time.Hour}
|
||||
}
|
||||
if obj.ConfigSyncPeriod.Duration == 0 {
|
||||
obj.ConfigSyncPeriod.Duration = 15 * time.Minute
|
||||
}
|
||||
|
||||
if len(obj.ClientConnection.ContentType) == 0 {
|
||||
obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf"
|
||||
}
|
||||
if obj.ClientConnection.QPS == 0.0 {
|
||||
obj.ClientConnection.QPS = 5.0
|
||||
}
|
||||
if obj.ClientConnection.Burst == 0 {
|
||||
obj.ClientConnection.Burst = 10
|
||||
}
|
||||
}
|
22
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/doc.go
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/doc.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
|
||||
package v1alpha1 // import "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
|
50
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/register.go
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/register.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "kubeproxy.config.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
|
||||
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&KubeProxyConfiguration{},
|
||||
)
|
||||
return nil
|
||||
}
|
161
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/types.go
generated
vendored
Normal file
161
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/types.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ClientConnectionConfiguration contains details for constructing a client.
|
||||
type ClientConnectionConfiguration struct {
|
||||
// kubeConfigFile is the path to a kubeconfig file.
|
||||
KubeConfigFile string `json:"kubeconfig"`
|
||||
// acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
|
||||
// default value of 'application/json'. This field will control all connections to the server used by a particular
|
||||
// client.
|
||||
AcceptContentTypes string `json:"acceptContentTypes"`
|
||||
// contentType is the content type used when sending data to the server from this client.
|
||||
ContentType string `json:"contentType"`
|
||||
// cps controls the number of queries per second allowed for this connection.
|
||||
QPS float32 `json:"qps"`
|
||||
// burst allows extra queries to accumulate when a client is exceeding its rate.
|
||||
Burst int `json:"burst"`
|
||||
}
|
||||
|
||||
// KubeProxyIPTablesConfiguration contains iptables-related configuration
|
||||
// details for the Kubernetes proxy server.
|
||||
type KubeProxyIPTablesConfiguration struct {
|
||||
// masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
|
||||
// the pure iptables proxy mode. Values must be within the range [0, 31].
|
||||
MasqueradeBit *int32 `json:"masqueradeBit"`
|
||||
// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
|
||||
MasqueradeAll bool `json:"masqueradeAll"`
|
||||
// syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
|
||||
// '2h22m'). Must be greater than 0.
|
||||
SyncPeriod metav1.Duration `json:"syncPeriod"`
|
||||
// minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m',
|
||||
// '2h22m').
|
||||
MinSyncPeriod metav1.Duration `json:"minSyncPeriod"`
|
||||
}
|
||||
|
||||
// KubeProxyIPVSConfiguration contains ipvs-related configuration
|
||||
// details for the Kubernetes proxy server.
|
||||
type KubeProxyIPVSConfiguration struct {
|
||||
// syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m',
|
||||
// '2h22m'). Must be greater than 0.
|
||||
SyncPeriod metav1.Duration `json:"syncPeriod"`
|
||||
// minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m',
|
||||
// '2h22m').
|
||||
MinSyncPeriod metav1.Duration `json:"minSyncPeriod"`
|
||||
// ipvs scheduler
|
||||
Scheduler string `json:"scheduler"`
|
||||
}
|
||||
|
||||
// KubeProxyConntrackConfiguration contains conntrack settings for
|
||||
// the Kubernetes proxy server.
|
||||
type KubeProxyConntrackConfiguration struct {
|
||||
// max is the maximum number of NAT connections to track (0 to
|
||||
// leave as-is). This takes precedence over maxPerCore and min.
|
||||
Max *int32 `json:"max"`
|
||||
// maxPerCore is the maximum number of NAT connections to track
|
||||
// per CPU core (0 to leave the limit as-is and ignore min).
|
||||
MaxPerCore *int32 `json:"maxPerCore"`
|
||||
// min is the minimum value of connect-tracking records to allocate,
|
||||
// regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is).
|
||||
Min *int32 `json:"min"`
|
||||
// tcpEstablishedTimeout is how long an idle TCP connection will be kept open
|
||||
// (e.g. '2s'). Must be greater than 0 to set.
|
||||
TCPEstablishedTimeout *metav1.Duration `json:"tcpEstablishedTimeout"`
|
||||
// tcpCloseWaitTimeout is how long an idle conntrack entry
|
||||
// in CLOSE_WAIT state will remain in the conntrack
|
||||
// table. (e.g. '60s'). Must be greater than 0 to set.
|
||||
TCPCloseWaitTimeout *metav1.Duration `json:"tcpCloseWaitTimeout"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// KubeProxyConfiguration contains everything necessary to configure the
|
||||
// Kubernetes proxy server.
|
||||
type KubeProxyConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// featureGates is a comma-separated list of key=value pairs that control
|
||||
// which alpha/beta features are enabled.
|
||||
//
|
||||
// TODO this really should be a map but that requires refactoring all
|
||||
// components to use config files because local-up-cluster.sh only supports
|
||||
// the --feature-gates flag right now, which is comma-separated key=value
|
||||
// pairs.
|
||||
FeatureGates string `json:"featureGates"`
|
||||
|
||||
// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
|
||||
// for all interfaces)
|
||||
BindAddress string `json:"bindAddress"`
|
||||
// healthzBindAddress is the IP address and port for the health check server to serve on,
|
||||
// defaulting to 0.0.0.0:10256
|
||||
HealthzBindAddress string `json:"healthzBindAddress"`
|
||||
// metricsBindAddress is the IP address and port for the metrics server to serve on,
|
||||
// defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)
|
||||
MetricsBindAddress string `json:"metricsBindAddress"`
|
||||
// enableProfiling enables profiling via web interface on /debug/pprof handler.
|
||||
// Profiling handlers will be handled by metrics server.
|
||||
EnableProfiling bool `json:"enableProfiling"`
|
||||
// clusterCIDR is the CIDR range of the pods in the cluster. It is used to
|
||||
// bridge traffic coming from outside of the cluster. If not provided,
|
||||
// no off-cluster bridging will be performed.
|
||||
ClusterCIDR string `json:"clusterCIDR"`
|
||||
// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
|
||||
HostnameOverride string `json:"hostnameOverride"`
|
||||
// clientConnection specifies the kubeconfig file and client connection settings for the proxy
|
||||
// server to use when communicating with the apiserver.
|
||||
ClientConnection ClientConnectionConfiguration `json:"clientConnection"`
|
||||
// iptables contains iptables-related configuration options.
|
||||
IPTables KubeProxyIPTablesConfiguration `json:"iptables"`
|
||||
// ipvs contains ipvs-related configuration options.
|
||||
IPVS KubeProxyIPVSConfiguration `json:"ipvs"`
|
||||
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
|
||||
// the range [-1000, 1000]
|
||||
OOMScoreAdj *int32 `json:"oomScoreAdj"`
|
||||
// mode specifies which proxy mode to use.
|
||||
Mode ProxyMode `json:"mode"`
|
||||
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
||||
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
|
||||
PortRange string `json:"portRange"`
|
||||
// resourceContainer is the bsolute name of the resource-only container to create and run
|
||||
// the Kube-proxy in (Default: /kube-proxy).
|
||||
ResourceContainer string `json:"resourceContainer"`
|
||||
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
|
||||
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||
UDPIdleTimeout metav1.Duration `json:"udpTimeoutMilliseconds"`
|
||||
// conntrack contains conntrack-related configuration options.
|
||||
Conntrack KubeProxyConntrackConfiguration `json:"conntrack"`
|
||||
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
|
||||
// than 0.
|
||||
ConfigSyncPeriod metav1.Duration `json:"configSyncPeriod"`
|
||||
}
|
||||
|
||||
// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'
|
||||
// (newer, faster). If blank, use the best-available proxy (currently iptables, but may
|
||||
// change in future versions). If the iptables proxy is selected, regardless of how, but
|
||||
// the system's kernel or iptables versions are insufficient, this always falls back to the
|
||||
// userspace proxy.
|
||||
type ProxyMode string
|
||||
|
||||
const (
|
||||
ProxyModeUserspace ProxyMode = "userspace"
|
||||
ProxyModeIPTables ProxyMode = "iptables"
|
||||
)
|
224
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go
generated
vendored
Normal file
224
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go
generated
vendored
Normal file
@ -0,0 +1,224 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by conversion-gen. Do not edit it manually!
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedConversionFuncs(
|
||||
Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration,
|
||||
Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration,
|
||||
Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration,
|
||||
Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration,
|
||||
Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration,
|
||||
Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration,
|
||||
Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration,
|
||||
Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration,
|
||||
Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration,
|
||||
Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration,
|
||||
)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in *ClientConnectionConfiguration, out *kubeproxyconfig.ClientConnectionConfiguration, s conversion.Scope) error {
|
||||
out.KubeConfigFile = in.KubeConfigFile
|
||||
out.AcceptContentTypes = in.AcceptContentTypes
|
||||
out.ContentType = in.ContentType
|
||||
out.QPS = in.QPS
|
||||
out.Burst = int32(in.Burst)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in *ClientConnectionConfiguration, out *kubeproxyconfig.ClientConnectionConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in *kubeproxyconfig.ClientConnectionConfiguration, out *ClientConnectionConfiguration, s conversion.Scope) error {
|
||||
out.KubeConfigFile = in.KubeConfigFile
|
||||
out.AcceptContentTypes = in.AcceptContentTypes
|
||||
out.ContentType = in.ContentType
|
||||
out.QPS = in.QPS
|
||||
out.Burst = int(in.Burst)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration is an autogenerated conversion function.
|
||||
func Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in *kubeproxyconfig.ClientConnectionConfiguration, out *ClientConnectionConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *kubeproxyconfig.KubeProxyConfiguration, s conversion.Scope) error {
|
||||
out.FeatureGates = in.FeatureGates
|
||||
out.BindAddress = in.BindAddress
|
||||
out.HealthzBindAddress = in.HealthzBindAddress
|
||||
out.MetricsBindAddress = in.MetricsBindAddress
|
||||
out.EnableProfiling = in.EnableProfiling
|
||||
out.ClusterCIDR = in.ClusterCIDR
|
||||
out.HostnameOverride = in.HostnameOverride
|
||||
if err := Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
|
||||
out.Mode = kubeproxyconfig.ProxyMode(in.Mode)
|
||||
out.PortRange = in.PortRange
|
||||
out.ResourceContainer = in.ResourceContainer
|
||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||
if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.ConfigSyncPeriod = in.ConfigSyncPeriod
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *kubeproxyconfig.KubeProxyConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *kubeproxyconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error {
|
||||
out.FeatureGates = in.FeatureGates
|
||||
out.BindAddress = in.BindAddress
|
||||
out.HealthzBindAddress = in.HealthzBindAddress
|
||||
out.MetricsBindAddress = in.MetricsBindAddress
|
||||
out.EnableProfiling = in.EnableProfiling
|
||||
out.ClusterCIDR = in.ClusterCIDR
|
||||
out.HostnameOverride = in.HostnameOverride
|
||||
if err := Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
|
||||
out.Mode = ProxyMode(in.Mode)
|
||||
out.PortRange = in.PortRange
|
||||
out.ResourceContainer = in.ResourceContainer
|
||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||
if err := Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.ConfigSyncPeriod = in.ConfigSyncPeriod
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration is an autogenerated conversion function.
|
||||
func Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *kubeproxyconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in *KubeProxyConntrackConfiguration, out *kubeproxyconfig.KubeProxyConntrackConfiguration, s conversion.Scope) error {
|
||||
out.Max = (*int32)(unsafe.Pointer(in.Max))
|
||||
out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore))
|
||||
out.Min = (*int32)(unsafe.Pointer(in.Min))
|
||||
out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout))
|
||||
out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in *KubeProxyConntrackConfiguration, out *kubeproxyconfig.KubeProxyConntrackConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *kubeproxyconfig.KubeProxyConntrackConfiguration, out *KubeProxyConntrackConfiguration, s conversion.Scope) error {
|
||||
out.Max = (*int32)(unsafe.Pointer(in.Max))
|
||||
out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore))
|
||||
out.Min = (*int32)(unsafe.Pointer(in.Min))
|
||||
out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout))
|
||||
out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration is an autogenerated conversion function.
|
||||
func Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *kubeproxyconfig.KubeProxyConntrackConfiguration, out *KubeProxyConntrackConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in *KubeProxyIPTablesConfiguration, out *kubeproxyconfig.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
|
||||
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
|
||||
out.MasqueradeAll = in.MasqueradeAll
|
||||
out.SyncPeriod = in.SyncPeriod
|
||||
out.MinSyncPeriod = in.MinSyncPeriod
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in *KubeProxyIPTablesConfiguration, out *kubeproxyconfig.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *kubeproxyconfig.KubeProxyIPTablesConfiguration, out *KubeProxyIPTablesConfiguration, s conversion.Scope) error {
|
||||
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
|
||||
out.MasqueradeAll = in.MasqueradeAll
|
||||
out.SyncPeriod = in.SyncPeriod
|
||||
out.MinSyncPeriod = in.MinSyncPeriod
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration is an autogenerated conversion function.
|
||||
func Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *kubeproxyconfig.KubeProxyIPTablesConfiguration, out *KubeProxyIPTablesConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in *KubeProxyIPVSConfiguration, out *kubeproxyconfig.KubeProxyIPVSConfiguration, s conversion.Scope) error {
|
||||
out.SyncPeriod = in.SyncPeriod
|
||||
out.MinSyncPeriod = in.MinSyncPeriod
|
||||
out.Scheduler = in.Scheduler
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in *KubeProxyIPVSConfiguration, out *kubeproxyconfig.KubeProxyIPVSConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *kubeproxyconfig.KubeProxyIPVSConfiguration, out *KubeProxyIPVSConfiguration, s conversion.Scope) error {
|
||||
out.SyncPeriod = in.SyncPeriod
|
||||
out.MinSyncPeriod = in.MinSyncPeriod
|
||||
out.Scheduler = in.Scheduler
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration is an autogenerated conversion function.
|
||||
func Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *kubeproxyconfig.KubeProxyIPVSConfiguration, out *KubeProxyIPVSConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in, out, s)
|
||||
}
|
189
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.deepcopy.go
generated
vendored
Normal file
189
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.deepcopy.go
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration.
|
||||
func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClientConnectionConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ClientConnection = in.ClientConnection
|
||||
in.IPTables.DeepCopyInto(&out.IPTables)
|
||||
out.IPVS = in.IPVS
|
||||
if in.OOMScoreAdj != nil {
|
||||
in, out := &in.OOMScoreAdj, &out.OOMScoreAdj
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||
in.Conntrack.DeepCopyInto(&out.Conntrack)
|
||||
out.ConfigSyncPeriod = in.ConfigSyncPeriod
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration.
|
||||
func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeProxyConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) {
|
||||
*out = *in
|
||||
if in.Max != nil {
|
||||
in, out := &in.Max, &out.Max
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.MaxPerCore != nil {
|
||||
in, out := &in.MaxPerCore, &out.MaxPerCore
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.Min != nil {
|
||||
in, out := &in.Min, &out.Min
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.TCPEstablishedTimeout != nil {
|
||||
in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.TCPCloseWaitTimeout != nil {
|
||||
in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration.
|
||||
func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeProxyConntrackConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) {
|
||||
*out = *in
|
||||
if in.MasqueradeBit != nil {
|
||||
in, out := &in.MasqueradeBit, &out.MasqueradeBit
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
out.SyncPeriod = in.SyncPeriod
|
||||
out.MinSyncPeriod = in.MinSyncPeriod
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration.
|
||||
func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeProxyIPTablesConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) {
|
||||
*out = *in
|
||||
out.SyncPeriod = in.SyncPeriod
|
||||
out.MinSyncPeriod = in.MinSyncPeriod
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration.
|
||||
func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeProxyIPVSConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
37
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.defaults.go
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.defaults.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by defaulter-gen. Do not edit it manually!
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
scheme.AddTypeDefaultingFunc(&KubeProxyConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeProxyConfiguration(obj.(*KubeProxyConfiguration)) })
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetObjectDefaults_KubeProxyConfiguration(in *KubeProxyConfiguration) {
|
||||
SetDefaults_KubeProxyConfiguration(in)
|
||||
}
|
45
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/BUILD
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/BUILD
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["validation.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation",
|
||||
deps = [
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validation_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
)
|
238
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/validation.go
generated
vendored
Normal file
238
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/validation.go
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
|
||||
)
|
||||
|
||||
// Validate validates the configuration of kube-proxy
|
||||
func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
newPath := field.NewPath("KubeProxyConfiguration")
|
||||
|
||||
allErrs = append(allErrs, validateKubeProxyIPTablesConfiguration(config.IPTables, newPath.Child("KubeProxyIPTablesConfiguration"))...)
|
||||
allErrs = append(allErrs, validateKubeProxyIPVSConfiguration(config.IPVS, newPath.Child("KubeProxyIPVSConfiguration"))...)
|
||||
allErrs = append(allErrs, validateKubeProxyConntrackConfiguration(config.Conntrack, newPath.Child("KubeProxyConntrackConfiguration"))...)
|
||||
allErrs = append(allErrs, validateProxyMode(config.Mode, newPath.Child("Mode"))...)
|
||||
allErrs = append(allErrs, validateClientConnectionConfiguration(config.ClientConnection, newPath.Child("ClientConnection"))...)
|
||||
|
||||
if config.OOMScoreAdj != nil && (*config.OOMScoreAdj < -1000 || *config.OOMScoreAdj > 1000) {
|
||||
allErrs = append(allErrs, field.Invalid(newPath.Child("OOMScoreAdj"), *config.OOMScoreAdj, "must be within the range [-1000, 1000]"))
|
||||
}
|
||||
|
||||
if config.UDPIdleTimeout.Duration <= 0 {
|
||||
allErrs = append(allErrs, field.Invalid(newPath.Child("UDPIdleTimeout"), config.UDPIdleTimeout, "must be greater than 0"))
|
||||
}
|
||||
|
||||
if config.ConfigSyncPeriod.Duration <= 0 {
|
||||
allErrs = append(allErrs, field.Invalid(newPath.Child("ConfigSyncPeriod"), config.ConfigSyncPeriod, "must be greater than 0"))
|
||||
}
|
||||
|
||||
if net.ParseIP(config.BindAddress) == nil {
|
||||
allErrs = append(allErrs, field.Invalid(newPath.Child("BindAddress"), config.BindAddress, "not a valid textual representation of an IP address"))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateHostPort(config.HealthzBindAddress, newPath.Child("HealthzBindAddress"))...)
|
||||
allErrs = append(allErrs, validateHostPort(config.MetricsBindAddress, newPath.Child("MetricsBindAddress"))...)
|
||||
|
||||
if config.ClusterCIDR != "" {
|
||||
if _, _, err := net.ParseCIDR(config.ClusterCIDR); err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid CIDR block (e.g. 10.100.0.0/16)"))
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := utilnet.ParsePortRange(config.PortRange); err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(newPath.Child("PortRange"), config.PortRange, "must be a valid port range (e.g. 300-2000)"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateKubeProxyIPTablesConfiguration(config kubeproxyconfig.KubeProxyIPTablesConfiguration, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if config.MasqueradeBit != nil && (*config.MasqueradeBit < 0 || *config.MasqueradeBit > 31) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("MasqueradeBit"), config.MasqueradeBit, "must be within the range [0, 31]"))
|
||||
}
|
||||
|
||||
if config.SyncPeriod.Duration <= 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.SyncPeriod, "must be greater than 0"))
|
||||
}
|
||||
|
||||
if config.MinSyncPeriod.Duration < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("MinSyncPeriod"), config.MinSyncPeriod, "must be greater than or equal to 0"))
|
||||
}
|
||||
|
||||
if config.MinSyncPeriod.Duration > config.SyncPeriod.Duration {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.MinSyncPeriod, fmt.Sprintf("must be greater than or equal to %s", fldPath.Child("MinSyncPeriod").String())))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateKubeProxyIPVSConfiguration(config kubeproxyconfig.KubeProxyIPVSConfiguration, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if config.SyncPeriod.Duration <= 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.SyncPeriod, "must be greater than 0"))
|
||||
}
|
||||
|
||||
if config.MinSyncPeriod.Duration < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("MinSyncPeriod"), config.MinSyncPeriod, "must be greater than or equal to 0"))
|
||||
}
|
||||
|
||||
if config.MinSyncPeriod.Duration > config.SyncPeriod.Duration {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.MinSyncPeriod, fmt.Sprintf("must be greater than or equal to %s", fldPath.Child("MinSyncPeriod").String())))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateIPVSSchedulerMethod(kubeproxyconfig.IPVSSchedulerMethod(config.Scheduler), fldPath.Child("Scheduler"))...)
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateKubeProxyConntrackConfiguration(config kubeproxyconfig.KubeProxyConntrackConfiguration, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if config.Max != nil && *config.Max < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("Max"), config.Max, "must be greater than or equal to 0"))
|
||||
}
|
||||
|
||||
if config.MaxPerCore != nil && *config.MaxPerCore < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("MaxPerCore"), config.MaxPerCore, "must be greater than or equal to 0"))
|
||||
}
|
||||
|
||||
if config.Min != nil && *config.Min < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("Min"), config.Min, "must be greater than or equal to 0"))
|
||||
}
|
||||
|
||||
if config.TCPEstablishedTimeout.Duration < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPEstablishedTimeout"), config.TCPEstablishedTimeout, "must be greater than or equal to 0"))
|
||||
}
|
||||
|
||||
if config.TCPCloseWaitTimeout.Duration < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPCloseWaitTimeout"), config.TCPCloseWaitTimeout, "must be greater than or equal to 0"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateProxyMode(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList {
|
||||
if runtime.GOOS == "windows" {
|
||||
return validateProxyModeWindows(mode, fldPath)
|
||||
}
|
||||
|
||||
return validateProxyModeLinux(mode, fldPath)
|
||||
}
|
||||
|
||||
func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
switch mode {
|
||||
case kubeproxyconfig.ProxyModeUserspace:
|
||||
case kubeproxyconfig.ProxyModeIPTables:
|
||||
case kubeproxyconfig.ProxyModeIPVS:
|
||||
case "":
|
||||
default:
|
||||
modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeIPTables), string(kubeproxyconfig.ProxyModeIPVS)}
|
||||
errMsg := fmt.Sprintf("must be %s or blank (blank means the best-available proxy [currently iptables])", strings.Join(modes, ","))
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
switch mode {
|
||||
case kubeproxyconfig.ProxyModeUserspace:
|
||||
case kubeproxyconfig.ProxyModeKernelspace:
|
||||
default:
|
||||
modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeKernelspace)}
|
||||
errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently userspace])", strings.Join(modes, ","))
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateClientConnectionConfiguration(config kubeproxyconfig.ClientConnectionConfiguration, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(config.Burst), fldPath.Child("Burst"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateHostPort(input string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
hostIP, port, err := net.SplitHostPort(input)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, input, "must be IP:port"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
if ip := net.ParseIP(hostIP); ip == nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, hostIP, "must be a valid IP"))
|
||||
}
|
||||
|
||||
if p, err := strconv.Atoi(port); err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, port, "must be a valid port"))
|
||||
} else if p < 1 || p > 65535 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, port, "must be a valid port"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateIPVSSchedulerMethod(scheduler kubeproxyconfig.IPVSSchedulerMethod, fldPath *field.Path) field.ErrorList {
|
||||
supportedMethod := []kubeproxyconfig.IPVSSchedulerMethod{
|
||||
kubeproxyconfig.RoundRobin,
|
||||
kubeproxyconfig.WeightedRoundRobin,
|
||||
kubeproxyconfig.LeastConnection,
|
||||
kubeproxyconfig.WeightedLeastConnection,
|
||||
kubeproxyconfig.LocalityBasedLeastConnection,
|
||||
kubeproxyconfig.LocalityBasedLeastConnectionWithReplication,
|
||||
kubeproxyconfig.SourceHashing,
|
||||
kubeproxyconfig.DestinationHashing,
|
||||
kubeproxyconfig.ShortestExpectedDelay,
|
||||
kubeproxyconfig.NeverQueue,
|
||||
"",
|
||||
}
|
||||
allErrs := field.ErrorList{}
|
||||
var found bool
|
||||
for i := range supportedMethod {
|
||||
if scheduler == supportedMethod[i] {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// Not found
|
||||
if !found {
|
||||
errMsg := fmt.Sprintf("must be in %v, blank means the default algorithm method (currently rr)", supportedMethod)
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("Scheduler"), string(scheduler), errMsg))
|
||||
}
|
||||
return allErrs
|
||||
}
|
653
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go
generated
vendored
Normal file
653
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/validation_test.go
generated
vendored
Normal file
@ -0,0 +1,653 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
|
||||
"k8s.io/kubernetes/pkg/util/pointer"
|
||||
)
|
||||
|
||||
func TestValidateKubeProxyConfiguration(t *testing.T) {
|
||||
successCases := []kubeproxyconfig.KubeProxyConfiguration{
|
||||
{
|
||||
BindAddress: "192.168.59.103",
|
||||
HealthzBindAddress: "0.0.0.0:10256",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, successCase := range successCases {
|
||||
if errs := Validate(&successCase); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
errorCases := []struct {
|
||||
config kubeproxyconfig.KubeProxyConfiguration
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||
// only BindAddress is invalid
|
||||
BindAddress: "10.10.12.11:2000",
|
||||
HealthzBindAddress: "0.0.0.0:10256",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
msg: "not a valid textual representation of an IP address",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "10.10.12.11",
|
||||
// only HealthzBindAddress is invalid
|
||||
HealthzBindAddress: "0.0.0.0",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
msg: "must be IP:port",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "10.10.12.11",
|
||||
HealthzBindAddress: "0.0.0.0:12345",
|
||||
// only MetricsBindAddress is invalid
|
||||
MetricsBindAddress: "127.0.0.1",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
msg: "must be IP:port",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "10.10.12.11",
|
||||
HealthzBindAddress: "0.0.0.0:12345",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
// only ClusterCIDR is invalid
|
||||
ClusterCIDR: "192.168.59.0",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
msg: "must be a valid CIDR block (e.g. 10.100.0.0/16)",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "10.10.12.11",
|
||||
HealthzBindAddress: "0.0.0.0:12345",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
// only UDPIdleTimeout is invalid
|
||||
UDPIdleTimeout: metav1.Duration{Duration: -1 * time.Second},
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
msg: "must be greater than 0",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||
BindAddress: "10.10.12.11",
|
||||
HealthzBindAddress: "0.0.0.0:12345",
|
||||
MetricsBindAddress: "127.0.0.1:10249",
|
||||
ClusterCIDR: "192.168.59.0/24",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
||||
// only ConfigSyncPeriod is invalid
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: -1 * time.Second},
|
||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
IPVS: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
},
|
||||
msg: "must be greater than 0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, errorCase := range errorCases {
|
||||
if errs := Validate(&errorCase.config); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %s", errorCase.msg)
|
||||
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
|
||||
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateKubeProxyIPTablesConfiguration(t *testing.T) {
|
||||
valid := int32(5)
|
||||
successCases := []kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
{
|
||||
MasqueradeBit: &valid,
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
}
|
||||
newPath := field.NewPath("KubeProxyConfiguration")
|
||||
for _, successCase := range successCases {
|
||||
if errs := validateKubeProxyIPTablesConfiguration(successCase, newPath.Child("KubeProxyIPTablesConfiguration")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
invalid := int32(-10)
|
||||
errorCases := []struct {
|
||||
config kubeproxyconfig.KubeProxyIPTablesConfiguration
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: -5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
msg: "must be greater than 0",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeBit: &valid,
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: -1 * time.Second},
|
||||
},
|
||||
msg: "must be greater than or equal to 0",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeBit: &invalid,
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
msg: "must be within the range [0, 31]",
|
||||
},
|
||||
// SyncPeriod must be >= MinSyncPeriod
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeBit: &valid,
|
||||
MasqueradeAll: true,
|
||||
SyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
msg: fmt.Sprintf("must be greater than or equal to %s", newPath.Child("KubeProxyIPTablesConfiguration").Child("MinSyncPeriod").String()),
|
||||
},
|
||||
}
|
||||
|
||||
for _, errorCase := range errorCases {
|
||||
if errs := validateKubeProxyIPTablesConfiguration(errorCase.config, newPath.Child("KubeProxyIPTablesConfiguration")); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %s", errorCase.msg)
|
||||
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
|
||||
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateKubeProxyIPVSConfiguration(t *testing.T) {
|
||||
newPath := field.NewPath("KubeProxyConfiguration")
|
||||
testCases := []struct {
|
||||
config kubeproxyconfig.KubeProxyIPVSConfiguration
|
||||
expectErr bool
|
||||
reason string
|
||||
}{
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: -5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
},
|
||||
expectErr: true,
|
||||
reason: "SyncPeriod must be greater than 0",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 0 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
},
|
||||
expectErr: true,
|
||||
reason: "SyncPeriod must be greater than 0",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: -1 * time.Second},
|
||||
},
|
||||
expectErr: true,
|
||||
reason: "MinSyncPeriod must be greater than or equal to 0",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
expectErr: true,
|
||||
reason: "SyncPeriod must be greater than or equal to MinSyncPeriod",
|
||||
},
|
||||
// SyncPeriod == MinSyncPeriod
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
// SyncPeriod > MinSyncPeriod
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
// SyncPeriod can be 0
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyIPVSConfiguration{
|
||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
MinSyncPeriod: metav1.Duration{Duration: 0 * time.Second},
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
errs := validateKubeProxyIPVSConfiguration(test.config, newPath.Child("KubeProxyIPVSConfiguration"))
|
||||
if len(errs) == 0 && test.expectErr {
|
||||
t.Errorf("Expect error, got nil, reason: %s", test.reason)
|
||||
}
|
||||
if len(errs) > 0 && !test.expectErr {
|
||||
t.Errorf("Unexpected error: %v", errs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateKubeProxyConntrackConfiguration(t *testing.T) {
|
||||
successCases := []kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
{
|
||||
Max: pointer.Int32Ptr(0),
|
||||
MaxPerCore: pointer.Int32Ptr(0),
|
||||
Min: pointer.Int32Ptr(0),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 0 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 0 * time.Second},
|
||||
},
|
||||
}
|
||||
newPath := field.NewPath("KubeProxyConfiguration")
|
||||
for _, successCase := range successCases {
|
||||
if errs := validateKubeProxyConntrackConfiguration(successCase, newPath.Child("KubeProxyConntrackConfiguration")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
errorCases := []struct {
|
||||
config kubeproxyconfig.KubeProxyConntrackConfiguration
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(-1),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
msg: "must be greater than or equal to 0",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(-1),
|
||||
Min: pointer.Int32Ptr(1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
msg: "must be greater than or equal to 0",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(2),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(-1),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
msg: "must be greater than or equal to 0",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(4),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(3),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: -5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
msg: "must be greater than or equal to 0",
|
||||
},
|
||||
{
|
||||
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
||||
Max: pointer.Int32Ptr(4),
|
||||
MaxPerCore: pointer.Int32Ptr(1),
|
||||
Min: pointer.Int32Ptr(3),
|
||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: -5 * time.Second},
|
||||
},
|
||||
msg: "must be greater than or equal to 0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, errorCase := range errorCases {
|
||||
if errs := validateKubeProxyConntrackConfiguration(errorCase.config, newPath.Child("KubeProxyConntrackConfiguration")); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %s", errorCase.msg)
|
||||
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
|
||||
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateProxyMode(t *testing.T) {
|
||||
newPath := field.NewPath("KubeProxyConfiguration")
|
||||
successCases := []kubeproxyconfig.ProxyMode{
|
||||
kubeproxyconfig.ProxyModeUserspace,
|
||||
kubeproxyconfig.ProxyMode(""),
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
successCases = append(successCases, kubeproxyconfig.ProxyModeKernelspace)
|
||||
} else {
|
||||
successCases = append(successCases, kubeproxyconfig.ProxyModeIPTables, kubeproxyconfig.ProxyModeIPVS)
|
||||
}
|
||||
|
||||
for _, successCase := range successCases {
|
||||
if errs := validateProxyMode(successCase, newPath.Child("ProxyMode")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
errorCases := []struct {
|
||||
mode kubeproxyconfig.ProxyMode
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
mode: kubeproxyconfig.ProxyMode("non-existing"),
|
||||
msg: "or blank (blank means the",
|
||||
},
|
||||
}
|
||||
|
||||
for _, errorCase := range errorCases {
|
||||
if errs := validateProxyMode(errorCase.mode, newPath.Child("ProxyMode")); len(errs) == 0 {
|
||||
t.Errorf("expected failure %s for %v", errorCase.msg, errorCase.mode)
|
||||
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
|
||||
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClientConnectionConfiguration(t *testing.T) {
|
||||
newPath := field.NewPath("KubeProxyConfiguration")
|
||||
|
||||
successCases := []kubeproxyconfig.ClientConnectionConfiguration{
|
||||
{
|
||||
Burst: 0,
|
||||
},
|
||||
{
|
||||
Burst: 5,
|
||||
},
|
||||
}
|
||||
|
||||
for _, successCase := range successCases {
|
||||
if errs := validateClientConnectionConfiguration(successCase, newPath.Child("Burst")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
errorCases := []struct {
|
||||
ccc kubeproxyconfig.ClientConnectionConfiguration
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
ccc: kubeproxyconfig.ClientConnectionConfiguration{Burst: -5},
|
||||
msg: "must be greater than or equal to 0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, errorCase := range errorCases {
|
||||
if errs := validateClientConnectionConfiguration(errorCase.ccc, newPath.Child("Burst")); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %s", errorCase.msg)
|
||||
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
|
||||
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHostPort(t *testing.T) {
|
||||
newPath := field.NewPath("KubeProxyConfiguration")
|
||||
|
||||
successCases := []string{
|
||||
"0.0.0.0:10256",
|
||||
"127.0.0.1:10256",
|
||||
"10.10.10.10:10256",
|
||||
}
|
||||
|
||||
for _, successCase := range successCases {
|
||||
if errs := validateHostPort(successCase, newPath.Child("HealthzBindAddress")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
errorCases := []struct {
|
||||
ccc string
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
ccc: "10.10.10.10",
|
||||
msg: "must be IP:port",
|
||||
},
|
||||
{
|
||||
ccc: "123.456.789.10:12345",
|
||||
msg: "must be a valid IP",
|
||||
},
|
||||
{
|
||||
ccc: "10.10.10.10:foo",
|
||||
msg: "must be a valid port",
|
||||
},
|
||||
{
|
||||
ccc: "10.10.10.10:0",
|
||||
msg: "must be a valid port",
|
||||
},
|
||||
{
|
||||
ccc: "10.10.10.10:65536",
|
||||
msg: "must be a valid port",
|
||||
},
|
||||
}
|
||||
|
||||
for _, errorCase := range errorCases {
|
||||
if errs := validateHostPort(errorCase.ccc, newPath.Child("HealthzBindAddress")); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %s", errorCase.msg)
|
||||
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
|
||||
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateIPVSSchedulerMethod(t *testing.T) {
|
||||
newPath := field.NewPath("KubeProxyConfiguration")
|
||||
|
||||
successCases := []kubeproxyconfig.IPVSSchedulerMethod{
|
||||
kubeproxyconfig.RoundRobin,
|
||||
kubeproxyconfig.WeightedRoundRobin,
|
||||
kubeproxyconfig.LeastConnection,
|
||||
kubeproxyconfig.WeightedLeastConnection,
|
||||
kubeproxyconfig.LocalityBasedLeastConnection,
|
||||
kubeproxyconfig.LocalityBasedLeastConnectionWithReplication,
|
||||
kubeproxyconfig.SourceHashing,
|
||||
kubeproxyconfig.DestinationHashing,
|
||||
kubeproxyconfig.ShortestExpectedDelay,
|
||||
kubeproxyconfig.NeverQueue,
|
||||
"",
|
||||
}
|
||||
|
||||
for _, successCase := range successCases {
|
||||
if errs := validateIPVSSchedulerMethod(successCase, newPath.Child("Scheduler")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
errorCases := []struct {
|
||||
mode kubeproxyconfig.IPVSSchedulerMethod
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
mode: kubeproxyconfig.IPVSSchedulerMethod("non-existing"),
|
||||
msg: "blank means the default algorithm method (currently rr)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, errorCase := range errorCases {
|
||||
if errs := validateIPVSSchedulerMethod(errorCase.mode, newPath.Child("ProxyMode")); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %s", errorCase.msg)
|
||||
} else if !strings.Contains(errs[0].Error(), errorCase.msg) {
|
||||
t.Errorf("unexpected error: %v, expected: %s", errs[0], errorCase.msg)
|
||||
}
|
||||
}
|
||||
}
|
189
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/zz_generated.deepcopy.go
generated
vendored
Normal file
189
vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/zz_generated.deepcopy.go
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package kubeproxyconfig
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration.
|
||||
func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClientConnectionConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ClientConnection = in.ClientConnection
|
||||
in.IPTables.DeepCopyInto(&out.IPTables)
|
||||
out.IPVS = in.IPVS
|
||||
if in.OOMScoreAdj != nil {
|
||||
in, out := &in.OOMScoreAdj, &out.OOMScoreAdj
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
||||
in.Conntrack.DeepCopyInto(&out.Conntrack)
|
||||
out.ConfigSyncPeriod = in.ConfigSyncPeriod
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration.
|
||||
func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeProxyConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) {
|
||||
*out = *in
|
||||
if in.Max != nil {
|
||||
in, out := &in.Max, &out.Max
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.MaxPerCore != nil {
|
||||
in, out := &in.MaxPerCore, &out.MaxPerCore
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.Min != nil {
|
||||
in, out := &in.Min, &out.Min
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.TCPEstablishedTimeout != nil {
|
||||
in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.TCPCloseWaitTimeout != nil {
|
||||
in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration.
|
||||
func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeProxyConntrackConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) {
|
||||
*out = *in
|
||||
if in.MasqueradeBit != nil {
|
||||
in, out := &in.MasqueradeBit, &out.MasqueradeBit
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
out.SyncPeriod = in.SyncPeriod
|
||||
out.MinSyncPeriod = in.MinSyncPeriod
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration.
|
||||
func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeProxyIPTablesConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) {
|
||||
*out = *in
|
||||
out.SyncPeriod = in.SyncPeriod
|
||||
out.MinSyncPeriod = in.MinSyncPeriod
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration.
|
||||
func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeProxyIPVSConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
58
vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"config.go",
|
||||
"doc.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/config",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library",
|
||||
"//pkg/client/listers/core/internalversion:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"api_test.go",
|
||||
"config_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/config",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
6
vendor/k8s.io/kubernetes/pkg/proxy/config/OWNERS
generated
vendored
Executable file
6
vendor/k8s.io/kubernetes/pkg/proxy/config/OWNERS
generated
vendored
Executable file
@ -0,0 +1,6 @@
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- brendandburns
|
||||
- freehan
|
214
vendor/k8s.io/kubernetes/pkg/proxy/config/api_test.go
generated
vendored
Normal file
214
vendor/k8s.io/kubernetes/pkg/proxy/config/api_test.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
ktesting "k8s.io/client-go/testing"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
)
|
||||
|
||||
func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
|
||||
service1v1 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s1"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}}}
|
||||
service1v2 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s1"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}}}
|
||||
service2 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s2"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 30}}}}
|
||||
|
||||
// Setup fake api client.
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
handler := NewServiceHandlerMock()
|
||||
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
serviceConfig := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
|
||||
serviceConfig.RegisterEventHandler(handler)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go serviceConfig.Run(stopCh)
|
||||
|
||||
// Add the first service
|
||||
fakeWatch.Add(service1v1)
|
||||
handler.ValidateServices(t, []*api.Service{service1v1})
|
||||
|
||||
// Add another service
|
||||
fakeWatch.Add(service2)
|
||||
handler.ValidateServices(t, []*api.Service{service1v1, service2})
|
||||
|
||||
// Modify service1
|
||||
fakeWatch.Modify(service1v2)
|
||||
handler.ValidateServices(t, []*api.Service{service1v2, service2})
|
||||
|
||||
// Delete service1
|
||||
fakeWatch.Delete(service1v2)
|
||||
handler.ValidateServices(t, []*api.Service{service2})
|
||||
|
||||
// Delete service2
|
||||
fakeWatch.Delete(service2)
|
||||
handler.ValidateServices(t, []*api.Service{})
|
||||
}
|
||||
|
||||
func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) {
|
||||
endpoints1v1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e1"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
}
|
||||
endpoints1v2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e1"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
{IP: "4.3.2.1"},
|
||||
},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
}
|
||||
endpoints2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e2"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{
|
||||
{IP: "5.6.7.8"},
|
||||
},
|
||||
Ports: []api.EndpointPort{{Port: 80, Protocol: "TCP"}},
|
||||
}},
|
||||
}
|
||||
|
||||
// Setup fake api client.
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
client.PrependWatchReactor("endpoints", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
handler := NewEndpointsHandlerMock()
|
||||
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
endpointsConfig := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
|
||||
endpointsConfig.RegisterEventHandler(handler)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go endpointsConfig.Run(stopCh)
|
||||
|
||||
// Add the first endpoints
|
||||
fakeWatch.Add(endpoints1v1)
|
||||
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints1v1})
|
||||
|
||||
// Add another endpoints
|
||||
fakeWatch.Add(endpoints2)
|
||||
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints1v1, endpoints2})
|
||||
|
||||
// Modify endpoints1
|
||||
fakeWatch.Modify(endpoints1v2)
|
||||
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints1v2, endpoints2})
|
||||
|
||||
// Delete endpoints1
|
||||
fakeWatch.Delete(endpoints1v2)
|
||||
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints2})
|
||||
|
||||
// Delete endpoints2
|
||||
fakeWatch.Delete(endpoints2)
|
||||
handler.ValidateEndpoints(t, []*api.Endpoints{})
|
||||
}
|
||||
|
||||
func newSvcHandler(t *testing.T, svcs []*api.Service, done func()) ServiceHandler {
|
||||
shm := &ServiceHandlerMock{
|
||||
state: make(map[types.NamespacedName]*api.Service),
|
||||
}
|
||||
shm.process = func(services []*api.Service) {
|
||||
defer done()
|
||||
if !reflect.DeepEqual(services, svcs) {
|
||||
t.Errorf("Unexpected services: %#v, expected: %#v", services, svcs)
|
||||
}
|
||||
}
|
||||
return shm
|
||||
}
|
||||
|
||||
func newEpsHandler(t *testing.T, eps []*api.Endpoints, done func()) EndpointsHandler {
|
||||
ehm := &EndpointsHandlerMock{
|
||||
state: make(map[types.NamespacedName]*api.Endpoints),
|
||||
}
|
||||
ehm.process = func(endpoints []*api.Endpoints) {
|
||||
defer done()
|
||||
if !reflect.DeepEqual(eps, endpoints) {
|
||||
t.Errorf("Unexpected endpoints: %#v, expected: %#v", endpoints, eps)
|
||||
}
|
||||
}
|
||||
return ehm
|
||||
}
|
||||
|
||||
func TestInitialSync(t *testing.T) {
|
||||
svc1 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
|
||||
}
|
||||
svc2 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
|
||||
}
|
||||
eps1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
}
|
||||
eps2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
// Wait for both services and endpoints handler.
|
||||
wg.Add(2)
|
||||
|
||||
// Setup fake api client.
|
||||
client := fake.NewSimpleClientset(svc1, svc2, eps2, eps1)
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
svcConfig := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), 0)
|
||||
epsConfig := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), 0)
|
||||
svcHandler := newSvcHandler(t, []*api.Service{svc2, svc1}, wg.Done)
|
||||
svcConfig.RegisterEventHandler(svcHandler)
|
||||
epsHandler := newEpsHandler(t, []*api.Endpoints{eps2, eps1}, wg.Done)
|
||||
epsConfig.RegisterEventHandler(epsHandler)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go svcConfig.Run(stopCh)
|
||||
go epsConfig.Run(stopCh)
|
||||
wg.Wait()
|
||||
}
|
263
vendor/k8s.io/kubernetes/pkg/proxy/config/config.go
generated
vendored
Normal file
263
vendor/k8s.io/kubernetes/pkg/proxy/config/config.go
generated
vendored
Normal file
@ -0,0 +1,263 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion"
|
||||
listers "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
// ServiceHandler is an abstract interface of objects which receive
|
||||
// notifications about service object changes.
|
||||
type ServiceHandler interface {
|
||||
// OnServiceAdd is called whenever creation of new service object
|
||||
// is observed.
|
||||
OnServiceAdd(service *api.Service)
|
||||
// OnServiceUpdate is called whenever modification of an existing
|
||||
// service object is observed.
|
||||
OnServiceUpdate(oldService, service *api.Service)
|
||||
// OnServiceDelete is called whenever deletion of an existing service
|
||||
// object is observed.
|
||||
OnServiceDelete(service *api.Service)
|
||||
// OnServiceSynced is called once all the initial even handlers were
|
||||
// called and the state is fully propagated to local cache.
|
||||
OnServiceSynced()
|
||||
}
|
||||
|
||||
// EndpointsHandler is an abstract interface of objects which receive
|
||||
// notifications about endpoints object changes.
|
||||
type EndpointsHandler interface {
|
||||
// OnEndpointsAdd is called whenever creation of new endpoints object
|
||||
// is observed.
|
||||
OnEndpointsAdd(endpoints *api.Endpoints)
|
||||
// OnEndpointsUpdate is called whenever modification of an existing
|
||||
// endpoints object is observed.
|
||||
OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints)
|
||||
// OnEndpointsDelete is called whever deletion of an existing endpoints
|
||||
// object is observed.
|
||||
OnEndpointsDelete(endpoints *api.Endpoints)
|
||||
// OnEndpointsSynced is called once all the initial event handlers were
|
||||
// called and the state is fully propagated to local cache.
|
||||
OnEndpointsSynced()
|
||||
}
|
||||
|
||||
// EndpointsConfig tracks a set of endpoints configurations.
|
||||
// It accepts "set", "add" and "remove" operations of endpoints via channels, and invokes registered handlers on change.
|
||||
type EndpointsConfig struct {
|
||||
lister listers.EndpointsLister
|
||||
listerSynced cache.InformerSynced
|
||||
eventHandlers []EndpointsHandler
|
||||
}
|
||||
|
||||
// NewEndpointsConfig creates a new EndpointsConfig.
|
||||
func NewEndpointsConfig(endpointsInformer coreinformers.EndpointsInformer, resyncPeriod time.Duration) *EndpointsConfig {
|
||||
result := &EndpointsConfig{
|
||||
lister: endpointsInformer.Lister(),
|
||||
listerSynced: endpointsInformer.Informer().HasSynced,
|
||||
}
|
||||
|
||||
endpointsInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: result.handleAddEndpoints,
|
||||
UpdateFunc: result.handleUpdateEndpoints,
|
||||
DeleteFunc: result.handleDeleteEndpoints,
|
||||
},
|
||||
resyncPeriod,
|
||||
)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// RegisterEventHandler registers a handler which is called on every endpoints change.
|
||||
func (c *EndpointsConfig) RegisterEventHandler(handler EndpointsHandler) {
|
||||
c.eventHandlers = append(c.eventHandlers, handler)
|
||||
}
|
||||
|
||||
// Run starts the goroutine responsible for calling registered handlers.
|
||||
func (c *EndpointsConfig) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Info("Starting endpoints config controller")
|
||||
defer glog.Info("Shutting down endpoints config controller")
|
||||
|
||||
if !controller.WaitForCacheSync("endpoints config", stopCh, c.listerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range c.eventHandlers {
|
||||
glog.V(3).Infof("Calling handler.OnEndpointsSynced()")
|
||||
c.eventHandlers[i].OnEndpointsSynced()
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (c *EndpointsConfig) handleAddEndpoints(obj interface{}) {
|
||||
endpoints, ok := obj.(*api.Endpoints)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
|
||||
return
|
||||
}
|
||||
for i := range c.eventHandlers {
|
||||
glog.V(4).Infof("Calling handler.OnEndpointsAdd")
|
||||
c.eventHandlers[i].OnEndpointsAdd(endpoints)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *EndpointsConfig) handleUpdateEndpoints(oldObj, newObj interface{}) {
|
||||
oldEndpoints, ok := oldObj.(*api.Endpoints)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", oldObj))
|
||||
return
|
||||
}
|
||||
endpoints, ok := newObj.(*api.Endpoints)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj))
|
||||
return
|
||||
}
|
||||
for i := range c.eventHandlers {
|
||||
glog.V(4).Infof("Calling handler.OnEndpointsUpdate")
|
||||
c.eventHandlers[i].OnEndpointsUpdate(oldEndpoints, endpoints)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *EndpointsConfig) handleDeleteEndpoints(obj interface{}) {
|
||||
endpoints, ok := obj.(*api.Endpoints)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
|
||||
return
|
||||
}
|
||||
if endpoints, ok = tombstone.Obj.(*api.Endpoints); !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
|
||||
return
|
||||
}
|
||||
}
|
||||
for i := range c.eventHandlers {
|
||||
glog.V(4).Infof("Calling handler.OnEndpointsDelete")
|
||||
c.eventHandlers[i].OnEndpointsDelete(endpoints)
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceConfig tracks a set of service configurations.
|
||||
// It accepts "set", "add" and "remove" operations of services via channels, and invokes registered handlers on change.
|
||||
type ServiceConfig struct {
|
||||
lister listers.ServiceLister
|
||||
listerSynced cache.InformerSynced
|
||||
eventHandlers []ServiceHandler
|
||||
}
|
||||
|
||||
// NewServiceConfig creates a new ServiceConfig.
|
||||
func NewServiceConfig(serviceInformer coreinformers.ServiceInformer, resyncPeriod time.Duration) *ServiceConfig {
|
||||
result := &ServiceConfig{
|
||||
lister: serviceInformer.Lister(),
|
||||
listerSynced: serviceInformer.Informer().HasSynced,
|
||||
}
|
||||
|
||||
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: result.handleAddService,
|
||||
UpdateFunc: result.handleUpdateService,
|
||||
DeleteFunc: result.handleDeleteService,
|
||||
},
|
||||
resyncPeriod,
|
||||
)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// RegisterEventHandler registers a handler which is called on every service change.
|
||||
func (c *ServiceConfig) RegisterEventHandler(handler ServiceHandler) {
|
||||
c.eventHandlers = append(c.eventHandlers, handler)
|
||||
}
|
||||
|
||||
// Run starts the goroutine responsible for calling
|
||||
// registered handlers.
|
||||
func (c *ServiceConfig) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Info("Starting service config controller")
|
||||
defer glog.Info("Shutting down service config controller")
|
||||
|
||||
if !controller.WaitForCacheSync("service config", stopCh, c.listerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range c.eventHandlers {
|
||||
glog.V(3).Infof("Calling handler.OnServiceSynced()")
|
||||
c.eventHandlers[i].OnServiceSynced()
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (c *ServiceConfig) handleAddService(obj interface{}) {
|
||||
service, ok := obj.(*api.Service)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
|
||||
return
|
||||
}
|
||||
for i := range c.eventHandlers {
|
||||
glog.V(4).Infof("Calling handler.OnServiceAdd")
|
||||
c.eventHandlers[i].OnServiceAdd(service)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ServiceConfig) handleUpdateService(oldObj, newObj interface{}) {
|
||||
oldService, ok := oldObj.(*api.Service)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", oldObj))
|
||||
return
|
||||
}
|
||||
service, ok := newObj.(*api.Service)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj))
|
||||
return
|
||||
}
|
||||
for i := range c.eventHandlers {
|
||||
glog.V(4).Infof("Calling handler.OnServiceUpdate")
|
||||
c.eventHandlers[i].OnServiceUpdate(oldService, service)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ServiceConfig) handleDeleteService(obj interface{}) {
|
||||
service, ok := obj.(*api.Service)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
|
||||
return
|
||||
}
|
||||
if service, ok = tombstone.Obj.(*api.Service); !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
|
||||
return
|
||||
}
|
||||
}
|
||||
for i := range c.eventHandlers {
|
||||
glog.V(4).Infof("Calling handler.OnServiceDelete")
|
||||
c.eventHandlers[i].OnServiceDelete(service)
|
||||
}
|
||||
}
|
435
vendor/k8s.io/kubernetes/pkg/proxy/config/config_test.go
generated
vendored
Normal file
435
vendor/k8s.io/kubernetes/pkg/proxy/config/config_test.go
generated
vendored
Normal file
@ -0,0 +1,435 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
ktesting "k8s.io/client-go/testing"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
)
|
||||
|
||||
type sortedServices []*api.Service
|
||||
|
||||
func (s sortedServices) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s sortedServices) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s sortedServices) Less(i, j int) bool {
|
||||
return s[i].Name < s[j].Name
|
||||
}
|
||||
|
||||
type ServiceHandlerMock struct {
|
||||
lock sync.Mutex
|
||||
|
||||
state map[types.NamespacedName]*api.Service
|
||||
synced bool
|
||||
updated chan []*api.Service
|
||||
process func([]*api.Service)
|
||||
}
|
||||
|
||||
func NewServiceHandlerMock() *ServiceHandlerMock {
|
||||
shm := &ServiceHandlerMock{
|
||||
state: make(map[types.NamespacedName]*api.Service),
|
||||
updated: make(chan []*api.Service, 5),
|
||||
}
|
||||
shm.process = func(services []*api.Service) {
|
||||
shm.updated <- services
|
||||
}
|
||||
return shm
|
||||
}
|
||||
|
||||
func (h *ServiceHandlerMock) OnServiceAdd(service *api.Service) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
||||
h.state[namespacedName] = service
|
||||
h.sendServices()
|
||||
}
|
||||
|
||||
func (h *ServiceHandlerMock) OnServiceUpdate(oldService, service *api.Service) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
||||
h.state[namespacedName] = service
|
||||
h.sendServices()
|
||||
}
|
||||
|
||||
func (h *ServiceHandlerMock) OnServiceDelete(service *api.Service) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
||||
delete(h.state, namespacedName)
|
||||
h.sendServices()
|
||||
}
|
||||
|
||||
func (h *ServiceHandlerMock) OnServiceSynced() {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
h.synced = true
|
||||
h.sendServices()
|
||||
}
|
||||
|
||||
func (h *ServiceHandlerMock) sendServices() {
|
||||
if !h.synced {
|
||||
return
|
||||
}
|
||||
services := make([]*api.Service, 0, len(h.state))
|
||||
for _, svc := range h.state {
|
||||
services = append(services, svc)
|
||||
}
|
||||
sort.Sort(sortedServices(services))
|
||||
h.process(services)
|
||||
}
|
||||
|
||||
func (h *ServiceHandlerMock) ValidateServices(t *testing.T, expectedServices []*api.Service) {
|
||||
// We might get 1 or more updates for N service updates, because we
|
||||
// over write older snapshots of services from the producer go-routine
|
||||
// if the consumer falls behind.
|
||||
var services []*api.Service
|
||||
for {
|
||||
select {
|
||||
case services = <-h.updated:
|
||||
if reflect.DeepEqual(services, expectedServices) {
|
||||
return
|
||||
}
|
||||
// Unittests will hard timeout in 5m with a stack trace, prevent that
|
||||
// and surface a clearer reason for failure.
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("Timed out. Expected %#v, Got %#v", expectedServices, services)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sortedEndpoints []*api.Endpoints
|
||||
|
||||
func (s sortedEndpoints) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s sortedEndpoints) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s sortedEndpoints) Less(i, j int) bool {
|
||||
return s[i].Name < s[j].Name
|
||||
}
|
||||
|
||||
type EndpointsHandlerMock struct {
|
||||
lock sync.Mutex
|
||||
|
||||
state map[types.NamespacedName]*api.Endpoints
|
||||
synced bool
|
||||
updated chan []*api.Endpoints
|
||||
process func([]*api.Endpoints)
|
||||
}
|
||||
|
||||
func NewEndpointsHandlerMock() *EndpointsHandlerMock {
|
||||
ehm := &EndpointsHandlerMock{
|
||||
state: make(map[types.NamespacedName]*api.Endpoints),
|
||||
updated: make(chan []*api.Endpoints, 5),
|
||||
}
|
||||
ehm.process = func(endpoints []*api.Endpoints) {
|
||||
ehm.updated <- endpoints
|
||||
}
|
||||
return ehm
|
||||
}
|
||||
|
||||
func (h *EndpointsHandlerMock) OnEndpointsAdd(endpoints *api.Endpoints) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
|
||||
h.state[namespacedName] = endpoints
|
||||
h.sendEndpoints()
|
||||
}
|
||||
|
||||
func (h *EndpointsHandlerMock) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
|
||||
h.state[namespacedName] = endpoints
|
||||
h.sendEndpoints()
|
||||
}
|
||||
|
||||
func (h *EndpointsHandlerMock) OnEndpointsDelete(endpoints *api.Endpoints) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
|
||||
delete(h.state, namespacedName)
|
||||
h.sendEndpoints()
|
||||
}
|
||||
|
||||
func (h *EndpointsHandlerMock) OnEndpointsSynced() {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
h.synced = true
|
||||
h.sendEndpoints()
|
||||
}
|
||||
|
||||
func (h *EndpointsHandlerMock) sendEndpoints() {
|
||||
if !h.synced {
|
||||
return
|
||||
}
|
||||
endpoints := make([]*api.Endpoints, 0, len(h.state))
|
||||
for _, eps := range h.state {
|
||||
endpoints = append(endpoints, eps)
|
||||
}
|
||||
sort.Sort(sortedEndpoints(endpoints))
|
||||
h.process(endpoints)
|
||||
}
|
||||
|
||||
func (h *EndpointsHandlerMock) ValidateEndpoints(t *testing.T, expectedEndpoints []*api.Endpoints) {
|
||||
// We might get 1 or more updates for N endpoint updates, because we
|
||||
// over write older snapshots of endpoints from the producer go-routine
|
||||
// if the consumer falls behind. Unittests will hard timeout in 5m.
|
||||
var endpoints []*api.Endpoints
|
||||
for {
|
||||
select {
|
||||
case endpoints = <-h.updated:
|
||||
if reflect.DeepEqual(endpoints, expectedEndpoints) {
|
||||
return
|
||||
}
|
||||
// Unittests will hard timeout in 5m with a stack trace, prevent that
|
||||
// and surface a clearer reason for failure.
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("Timed out. Expected %#v, Got %#v", expectedEndpoints, endpoints)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewServiceAddedAndNotified(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
|
||||
handler := NewServiceHandlerMock()
|
||||
config.RegisterEventHandler(handler)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go config.Run(stopCh)
|
||||
|
||||
service := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
|
||||
}
|
||||
fakeWatch.Add(service)
|
||||
handler.ValidateServices(t, []*api.Service{service})
|
||||
}
|
||||
|
||||
func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
|
||||
handler := NewServiceHandlerMock()
|
||||
config.RegisterEventHandler(handler)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go config.Run(stopCh)
|
||||
|
||||
service1 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
|
||||
}
|
||||
fakeWatch.Add(service1)
|
||||
handler.ValidateServices(t, []*api.Service{service1})
|
||||
|
||||
service2 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}},
|
||||
}
|
||||
fakeWatch.Add(service2)
|
||||
services := []*api.Service{service2, service1}
|
||||
handler.ValidateServices(t, services)
|
||||
|
||||
fakeWatch.Delete(service1)
|
||||
services = []*api.Service{service2}
|
||||
handler.ValidateServices(t, services)
|
||||
}
|
||||
|
||||
func TestNewServicesMultipleHandlersAddedAndNotified(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
|
||||
handler := NewServiceHandlerMock()
|
||||
handler2 := NewServiceHandlerMock()
|
||||
config.RegisterEventHandler(handler)
|
||||
config.RegisterEventHandler(handler2)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go config.Run(stopCh)
|
||||
|
||||
service1 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
|
||||
}
|
||||
service2 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}},
|
||||
}
|
||||
fakeWatch.Add(service1)
|
||||
fakeWatch.Add(service2)
|
||||
|
||||
services := []*api.Service{service2, service1}
|
||||
handler.ValidateServices(t, services)
|
||||
handler2.ValidateServices(t, services)
|
||||
}
|
||||
|
||||
func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
client.PrependWatchReactor("endpoints", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
config := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
|
||||
handler := NewEndpointsHandlerMock()
|
||||
handler2 := NewEndpointsHandlerMock()
|
||||
config.RegisterEventHandler(handler)
|
||||
config.RegisterEventHandler(handler2)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go config.Run(stopCh)
|
||||
|
||||
endpoints1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
}
|
||||
endpoints2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "3.3.3.3"}, {IP: "4.4.4.4"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
}
|
||||
fakeWatch.Add(endpoints1)
|
||||
fakeWatch.Add(endpoints2)
|
||||
|
||||
endpoints := []*api.Endpoints{endpoints2, endpoints1}
|
||||
handler.ValidateEndpoints(t, endpoints)
|
||||
handler2.ValidateEndpoints(t, endpoints)
|
||||
}
|
||||
|
||||
func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
client.PrependWatchReactor("endpoints", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
config := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
|
||||
handler := NewEndpointsHandlerMock()
|
||||
handler2 := NewEndpointsHandlerMock()
|
||||
config.RegisterEventHandler(handler)
|
||||
config.RegisterEventHandler(handler2)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go config.Run(stopCh)
|
||||
|
||||
endpoints1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
}
|
||||
endpoints2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "3.3.3.3"}, {IP: "4.4.4.4"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
}
|
||||
fakeWatch.Add(endpoints1)
|
||||
fakeWatch.Add(endpoints2)
|
||||
|
||||
endpoints := []*api.Endpoints{endpoints2, endpoints1}
|
||||
handler.ValidateEndpoints(t, endpoints)
|
||||
handler2.ValidateEndpoints(t, endpoints)
|
||||
|
||||
// Add one more
|
||||
endpoints3 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foobar"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "5.5.5.5"}, {IP: "6.6.6.6"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
}
|
||||
fakeWatch.Add(endpoints3)
|
||||
endpoints = []*api.Endpoints{endpoints2, endpoints1, endpoints3}
|
||||
handler.ValidateEndpoints(t, endpoints)
|
||||
handler2.ValidateEndpoints(t, endpoints)
|
||||
|
||||
// Update the "foo" service with new endpoints
|
||||
endpoints1v2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "7.7.7.7"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
}
|
||||
fakeWatch.Modify(endpoints1v2)
|
||||
endpoints = []*api.Endpoints{endpoints2, endpoints1v2, endpoints3}
|
||||
handler.ValidateEndpoints(t, endpoints)
|
||||
handler2.ValidateEndpoints(t, endpoints)
|
||||
|
||||
// Remove "bar" endpoints
|
||||
fakeWatch.Delete(endpoints2)
|
||||
endpoints = []*api.Endpoints{endpoints1v2, endpoints3}
|
||||
handler.ValidateEndpoints(t, endpoints)
|
||||
handler2.ValidateEndpoints(t, endpoints)
|
||||
}
|
||||
|
||||
// TODO: Add a unittest for interrupts getting processed in a timely manner.
|
||||
// Currently this module has a circular dependency with config, and so it's
|
||||
// named config_test, which means even test methods need to be public. This
|
||||
// is refactoring that we can avoid by resolving the dependency.
|
25
vendor/k8s.io/kubernetes/pkg/proxy/config/doc.go
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/pkg/proxy/config/doc.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package config provides decoupling between various configuration sources (etcd, files,...) and
|
||||
// the pieces that actually care about them (loadbalancer, proxy). Config takes 1 or more
|
||||
// configuration sources and allows for incremental (add/remove) and full replace (set)
|
||||
// changes from each of the sources, then creates a union of the configuration and provides
|
||||
// a unified view for both service handlers as well as endpoint handlers. There is no attempt
|
||||
// to resolve conflicts of any sort. Basic idea is that each configuration source gets a channel
|
||||
// from the Config service and pushes updates to it via that channel. Config then keeps track of
|
||||
// incremental & replace changes and distributes them to listeners as appropriate.
|
||||
package config // import "k8s.io/kubernetes/pkg/proxy/config"
|
18
vendor/k8s.io/kubernetes/pkg/proxy/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/proxy/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package proxy implements the layer-3 network proxy.
|
||||
package proxy // import "k8s.io/kubernetes/pkg/proxy"
|
52
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"healthcheck.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/healthcheck",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/renstrom/dedent:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["healthcheck_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/healthcheck",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
2
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/OWNERS
generated
vendored
Executable file
2
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/OWNERS
generated
vendored
Executable file
@ -0,0 +1,2 @@
|
||||
reviewers:
|
||||
- m1093782566
|
18
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package healthcheck provides tools for serving kube-proxy healthchecks.
|
||||
package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck"
|
347
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go
generated
vendored
Normal file
347
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go
generated
vendored
Normal file
@ -0,0 +1,347 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthcheck
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/renstrom/dedent"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/record"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
var nodeHealthzRetryInterval = 60 * time.Second
|
||||
|
||||
// Server serves HTTP endpoints for each service name, with results
|
||||
// based on the endpoints. If there are 0 endpoints for a service, it returns a
|
||||
// 503 "Service Unavailable" error (telling LBs not to use this node). If there
|
||||
// are 1 or more endpoints, it returns a 200 "OK".
|
||||
type Server interface {
|
||||
// Make the new set of services be active. Services that were open before
|
||||
// will be closed. Services that are new will be opened. Service that
|
||||
// existed and are in the new set will be left alone. The value of the map
|
||||
// is the healthcheck-port to listen on.
|
||||
SyncServices(newServices map[types.NamespacedName]uint16) error
|
||||
// Make the new set of endpoints be active. Endpoints for services that do
|
||||
// not exist will be dropped. The value of the map is the number of
|
||||
// endpoints the service has on this node.
|
||||
SyncEndpoints(newEndpoints map[types.NamespacedName]int) error
|
||||
}
|
||||
|
||||
// Listener allows for testing of Server. If the Listener argument
|
||||
// to NewServer() is nil, the real net.Listen function will be used.
|
||||
type Listener interface {
|
||||
// Listen is very much like net.Listen, except the first arg (network) is
|
||||
// fixed to be "tcp".
|
||||
Listen(addr string) (net.Listener, error)
|
||||
}
|
||||
|
||||
// HTTPServerFactory allows for testing of Server. If the
|
||||
// HTTPServerFactory argument to NewServer() is nil, the real
|
||||
// http.Server type will be used.
|
||||
type HTTPServerFactory interface {
|
||||
// New creates an instance of a type satisfying HTTPServer. This is
|
||||
// designed to include http.Server.
|
||||
New(addr string, handler http.Handler) HTTPServer
|
||||
}
|
||||
|
||||
// HTTPServer allows for testing of Server.
|
||||
type HTTPServer interface {
|
||||
// Server is designed so that http.Server satifies this interface,
|
||||
Serve(listener net.Listener) error
|
||||
}
|
||||
|
||||
// NewServer allocates a new healthcheck server manager. If either
|
||||
// of the injected arguments are nil, defaults will be used.
|
||||
func NewServer(hostname string, recorder record.EventRecorder, listener Listener, httpServerFactory HTTPServerFactory) Server {
|
||||
if listener == nil {
|
||||
listener = stdNetListener{}
|
||||
}
|
||||
if httpServerFactory == nil {
|
||||
httpServerFactory = stdHTTPServerFactory{}
|
||||
}
|
||||
return &server{
|
||||
hostname: hostname,
|
||||
recorder: recorder,
|
||||
listener: listener,
|
||||
httpFactory: httpServerFactory,
|
||||
services: map[types.NamespacedName]*hcInstance{},
|
||||
}
|
||||
}
|
||||
|
||||
// Implement Listener in terms of net.Listen.
|
||||
type stdNetListener struct{}
|
||||
|
||||
func (stdNetListener) Listen(addr string) (net.Listener, error) {
|
||||
return net.Listen("tcp", addr)
|
||||
}
|
||||
|
||||
var _ Listener = stdNetListener{}
|
||||
|
||||
// Implement HTTPServerFactory in terms of http.Server.
|
||||
type stdHTTPServerFactory struct{}
|
||||
|
||||
func (stdHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer {
|
||||
return &http.Server{
|
||||
Addr: addr,
|
||||
Handler: handler,
|
||||
}
|
||||
}
|
||||
|
||||
var _ HTTPServerFactory = stdHTTPServerFactory{}
|
||||
|
||||
type server struct {
|
||||
hostname string
|
||||
recorder record.EventRecorder // can be nil
|
||||
listener Listener
|
||||
httpFactory HTTPServerFactory
|
||||
|
||||
lock sync.Mutex
|
||||
services map[types.NamespacedName]*hcInstance
|
||||
}
|
||||
|
||||
func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) error {
|
||||
hcs.lock.Lock()
|
||||
defer hcs.lock.Unlock()
|
||||
|
||||
// Remove any that are not needed any more.
|
||||
for nsn, svc := range hcs.services {
|
||||
if port, found := newServices[nsn]; !found || port != svc.port {
|
||||
glog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port)
|
||||
if err := svc.listener.Close(); err != nil {
|
||||
glog.Errorf("Close(%v): %v", svc.listener.Addr(), err)
|
||||
}
|
||||
delete(hcs.services, nsn)
|
||||
}
|
||||
}
|
||||
|
||||
// Add any that are needed.
|
||||
for nsn, port := range newServices {
|
||||
if hcs.services[nsn] != nil {
|
||||
glog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port)
|
||||
svc := &hcInstance{port: port}
|
||||
addr := fmt.Sprintf(":%d", port)
|
||||
svc.server = hcs.httpFactory.New(addr, hcHandler{name: nsn, hcs: hcs})
|
||||
var err error
|
||||
svc.listener, err = hcs.listener.Listen(addr)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("node %s failed to start healthcheck %q on port %d: %v", hcs.hostname, nsn.String(), port, err)
|
||||
|
||||
if hcs.recorder != nil {
|
||||
hcs.recorder.Eventf(
|
||||
&v1.ObjectReference{
|
||||
Kind: "Service",
|
||||
Namespace: nsn.Namespace,
|
||||
Name: nsn.Name,
|
||||
UID: types.UID(nsn.String()),
|
||||
}, api.EventTypeWarning, "FailedToStartServiceHealthcheck", msg)
|
||||
}
|
||||
glog.Error(msg)
|
||||
continue
|
||||
}
|
||||
hcs.services[nsn] = svc
|
||||
|
||||
go func(nsn types.NamespacedName, svc *hcInstance) {
|
||||
// Serve() will exit when the listener is closed.
|
||||
glog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port)
|
||||
if err := svc.server.Serve(svc.listener); err != nil {
|
||||
glog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err)
|
||||
return
|
||||
}
|
||||
glog.V(3).Infof("Healthcheck %q closed", nsn.String())
|
||||
}(nsn, svc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type hcInstance struct {
|
||||
port uint16
|
||||
listener net.Listener
|
||||
server HTTPServer
|
||||
endpoints int // number of local endpoints for a service
|
||||
}
|
||||
|
||||
type hcHandler struct {
|
||||
name types.NamespacedName
|
||||
hcs *server
|
||||
}
|
||||
|
||||
var _ http.Handler = hcHandler{}
|
||||
|
||||
func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
h.hcs.lock.Lock()
|
||||
svc, ok := h.hcs.services[h.name]
|
||||
if !ok || svc == nil {
|
||||
h.hcs.lock.Unlock()
|
||||
glog.Errorf("Received request for closed healthcheck %q", h.name.String())
|
||||
return
|
||||
}
|
||||
count := svc.endpoints
|
||||
h.hcs.lock.Unlock()
|
||||
|
||||
resp.Header().Set("Content-Type", "application/json")
|
||||
if count == 0 {
|
||||
resp.WriteHeader(http.StatusServiceUnavailable)
|
||||
} else {
|
||||
resp.WriteHeader(http.StatusOK)
|
||||
}
|
||||
fmt.Fprintf(resp, strings.Trim(dedent.Dedent(fmt.Sprintf(`
|
||||
{
|
||||
"service": {
|
||||
"namespace": %q,
|
||||
"name": %q
|
||||
},
|
||||
"localEndpoints": %d
|
||||
}
|
||||
`, h.name.Namespace, h.name.Name, count)), "\n"))
|
||||
}
|
||||
|
||||
func (hcs *server) SyncEndpoints(newEndpoints map[types.NamespacedName]int) error {
|
||||
hcs.lock.Lock()
|
||||
defer hcs.lock.Unlock()
|
||||
|
||||
for nsn, count := range newEndpoints {
|
||||
if hcs.services[nsn] == nil {
|
||||
glog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String())
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String())
|
||||
hcs.services[nsn].endpoints = count
|
||||
}
|
||||
for nsn, hci := range hcs.services {
|
||||
if _, found := newEndpoints[nsn]; !found {
|
||||
hci.endpoints = 0
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HealthzUpdater allows callers to update healthz timestamp only.
|
||||
type HealthzUpdater interface {
|
||||
UpdateTimestamp()
|
||||
}
|
||||
|
||||
// HealthzServer returns 200 "OK" by default. Once timestamp has been
|
||||
// updated, it verifies we don't exceed max no respond duration since
|
||||
// last update.
|
||||
type HealthzServer struct {
|
||||
listener Listener
|
||||
httpFactory HTTPServerFactory
|
||||
clock clock.Clock
|
||||
|
||||
addr string
|
||||
port int32
|
||||
healthTimeout time.Duration
|
||||
recorder record.EventRecorder
|
||||
nodeRef *v1.ObjectReference
|
||||
|
||||
lastUpdated atomic.Value
|
||||
}
|
||||
|
||||
// NewDefaultHealthzServer returns a default healthz http server.
|
||||
func NewDefaultHealthzServer(addr string, healthTimeout time.Duration, recorder record.EventRecorder, nodeRef *v1.ObjectReference) *HealthzServer {
|
||||
return newHealthzServer(nil, nil, nil, addr, healthTimeout, recorder, nodeRef)
|
||||
}
|
||||
|
||||
func newHealthzServer(listener Listener, httpServerFactory HTTPServerFactory, c clock.Clock, addr string, healthTimeout time.Duration, recorder record.EventRecorder, nodeRef *v1.ObjectReference) *HealthzServer {
|
||||
if listener == nil {
|
||||
listener = stdNetListener{}
|
||||
}
|
||||
if httpServerFactory == nil {
|
||||
httpServerFactory = stdHTTPServerFactory{}
|
||||
}
|
||||
if c == nil {
|
||||
c = clock.RealClock{}
|
||||
}
|
||||
return &HealthzServer{
|
||||
listener: listener,
|
||||
httpFactory: httpServerFactory,
|
||||
clock: c,
|
||||
addr: addr,
|
||||
healthTimeout: healthTimeout,
|
||||
recorder: recorder,
|
||||
nodeRef: nodeRef,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateTimestamp updates the lastUpdated timestamp.
|
||||
func (hs *HealthzServer) UpdateTimestamp() {
|
||||
hs.lastUpdated.Store(hs.clock.Now())
|
||||
}
|
||||
|
||||
// Run starts the healthz http server and returns.
|
||||
func (hs *HealthzServer) Run() {
|
||||
serveMux := http.NewServeMux()
|
||||
serveMux.Handle("/healthz", healthzHandler{hs: hs})
|
||||
server := hs.httpFactory.New(hs.addr, serveMux)
|
||||
|
||||
go wait.Until(func() {
|
||||
glog.V(3).Infof("Starting goroutine for healthz on %s", hs.addr)
|
||||
|
||||
listener, err := hs.listener.Listen(hs.addr)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Failed to start node healthz on %s: %v", hs.addr, err)
|
||||
if hs.recorder != nil {
|
||||
hs.recorder.Eventf(hs.nodeRef, api.EventTypeWarning, "FailedToStartNodeHealthcheck", msg)
|
||||
}
|
||||
glog.Error(msg)
|
||||
return
|
||||
}
|
||||
|
||||
if err := server.Serve(listener); err != nil {
|
||||
glog.Errorf("Healthz closed with error: %v", err)
|
||||
return
|
||||
}
|
||||
glog.Errorf("Unexpected healthz closed.")
|
||||
}, nodeHealthzRetryInterval, wait.NeverStop)
|
||||
}
|
||||
|
||||
type healthzHandler struct {
|
||||
hs *HealthzServer
|
||||
}
|
||||
|
||||
func (h healthzHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
lastUpdated := time.Time{}
|
||||
if val := h.hs.lastUpdated.Load(); val != nil {
|
||||
lastUpdated = val.(time.Time)
|
||||
}
|
||||
currentTime := h.hs.clock.Now()
|
||||
|
||||
resp.Header().Set("Content-Type", "application/json")
|
||||
if !lastUpdated.IsZero() && currentTime.After(lastUpdated.Add(h.hs.healthTimeout)) {
|
||||
resp.WriteHeader(http.StatusServiceUnavailable)
|
||||
} else {
|
||||
resp.WriteHeader(http.StatusOK)
|
||||
}
|
||||
fmt.Fprintf(resp, fmt.Sprintf(`{"lastUpdated": %q,"currentTime": %q}`, lastUpdated, currentTime))
|
||||
}
|
405
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck_test.go
generated
vendored
Normal file
405
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck_test.go
generated
vendored
Normal file
@ -0,0 +1,405 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthcheck
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
type fakeListener struct {
|
||||
openPorts sets.String
|
||||
}
|
||||
|
||||
func newFakeListener() *fakeListener {
|
||||
return &fakeListener{
|
||||
openPorts: sets.String{},
|
||||
}
|
||||
}
|
||||
|
||||
func (fake *fakeListener) hasPort(addr string) bool {
|
||||
return fake.openPorts.Has(addr)
|
||||
}
|
||||
|
||||
func (fake *fakeListener) Listen(addr string) (net.Listener, error) {
|
||||
fake.openPorts.Insert(addr)
|
||||
return &fakeNetListener{
|
||||
parent: fake,
|
||||
addr: addr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type fakeNetListener struct {
|
||||
parent *fakeListener
|
||||
addr string
|
||||
}
|
||||
|
||||
func (fake *fakeNetListener) Accept() (net.Conn, error) {
|
||||
// Not implemented
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fake *fakeNetListener) Close() error {
|
||||
fake.parent.openPorts.Delete(fake.addr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakeNetListener) Addr() net.Addr {
|
||||
// Not implemented
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeHTTPServerFactory struct{}
|
||||
|
||||
func newFakeHTTPServerFactory() *fakeHTTPServerFactory {
|
||||
return &fakeHTTPServerFactory{}
|
||||
}
|
||||
|
||||
func (fake *fakeHTTPServerFactory) New(addr string, handler http.Handler) HTTPServer {
|
||||
return &fakeHTTPServer{
|
||||
addr: addr,
|
||||
handler: handler,
|
||||
}
|
||||
}
|
||||
|
||||
type fakeHTTPServer struct {
|
||||
addr string
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
func (fake *fakeHTTPServer) Serve(listener net.Listener) error {
|
||||
return nil // Cause the goroutine to return
|
||||
}
|
||||
|
||||
func mknsn(ns, name string) types.NamespacedName {
|
||||
return types.NamespacedName{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
type hcPayload struct {
|
||||
Service struct {
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
LocalEndpoints int
|
||||
}
|
||||
|
||||
type healthzPayload struct {
|
||||
LastUpdated string
|
||||
CurrentTime string
|
||||
}
|
||||
|
||||
func TestServer(t *testing.T) {
|
||||
listener := newFakeListener()
|
||||
httpFactory := newFakeHTTPServerFactory()
|
||||
|
||||
hcsi := NewServer("hostname", nil, listener, httpFactory)
|
||||
hcs := hcsi.(*server)
|
||||
if len(hcs.services) != 0 {
|
||||
t.Errorf("expected 0 services, got %d", len(hcs.services))
|
||||
}
|
||||
|
||||
// sync nothing
|
||||
hcs.SyncServices(nil)
|
||||
if len(hcs.services) != 0 {
|
||||
t.Errorf("expected 0 services, got %d", len(hcs.services))
|
||||
}
|
||||
hcs.SyncEndpoints(nil)
|
||||
if len(hcs.services) != 0 {
|
||||
t.Errorf("expected 0 services, got %d", len(hcs.services))
|
||||
}
|
||||
|
||||
// sync unknown endpoints, should be dropped
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{mknsn("a", "b"): 93})
|
||||
if len(hcs.services) != 0 {
|
||||
t.Errorf("expected 0 services, got %d", len(hcs.services))
|
||||
}
|
||||
|
||||
// sync a real service
|
||||
nsn := mknsn("a", "b")
|
||||
hcs.SyncServices(map[types.NamespacedName]uint16{nsn: 9376})
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
if len(listener.openPorts) != 1 {
|
||||
t.Errorf("expected 1 open port, got %d\n%s", len(listener.openPorts), spew.Sdump(listener.openPorts))
|
||||
}
|
||||
if !listener.hasPort(":9376") {
|
||||
t.Errorf("expected port :9376 to be open\n%s", spew.Sdump(listener.openPorts))
|
||||
}
|
||||
// test the handler
|
||||
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
|
||||
|
||||
// sync an endpoint
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 18})
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 18 {
|
||||
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
// test the handler
|
||||
testHandler(hcs, nsn, http.StatusOK, 18, t)
|
||||
|
||||
// sync zero endpoints
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 0})
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
// test the handler
|
||||
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
|
||||
|
||||
// put the endpoint back
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 11})
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 11 {
|
||||
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
// sync nil endpoints
|
||||
hcs.SyncEndpoints(nil)
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
// test the handler
|
||||
testHandler(hcs, nsn, http.StatusServiceUnavailable, 0, t)
|
||||
|
||||
// put the endpoint back
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{nsn: 18})
|
||||
if len(hcs.services) != 1 {
|
||||
t.Errorf("expected 1 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn].endpoints != 18 {
|
||||
t.Errorf("expected 18 endpoints, got %d", hcs.services[nsn].endpoints)
|
||||
}
|
||||
// delete the service
|
||||
hcs.SyncServices(nil)
|
||||
if len(hcs.services) != 0 {
|
||||
t.Errorf("expected 0 services, got %d", len(hcs.services))
|
||||
}
|
||||
|
||||
// sync multiple services
|
||||
nsn1 := mknsn("a", "b")
|
||||
nsn2 := mknsn("c", "d")
|
||||
nsn3 := mknsn("e", "f")
|
||||
nsn4 := mknsn("g", "h")
|
||||
hcs.SyncServices(map[types.NamespacedName]uint16{
|
||||
nsn1: 9376,
|
||||
nsn2: 12909,
|
||||
nsn3: 11113,
|
||||
})
|
||||
if len(hcs.services) != 3 {
|
||||
t.Errorf("expected 3 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn1].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn1].endpoints)
|
||||
}
|
||||
if hcs.services[nsn2].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn2].endpoints)
|
||||
}
|
||||
if hcs.services[nsn3].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn3].endpoints)
|
||||
}
|
||||
if len(listener.openPorts) != 3 {
|
||||
t.Errorf("expected 3 open ports, got %d\n%s", len(listener.openPorts), spew.Sdump(listener.openPorts))
|
||||
}
|
||||
// test the handlers
|
||||
testHandler(hcs, nsn1, http.StatusServiceUnavailable, 0, t)
|
||||
testHandler(hcs, nsn2, http.StatusServiceUnavailable, 0, t)
|
||||
testHandler(hcs, nsn3, http.StatusServiceUnavailable, 0, t)
|
||||
|
||||
// sync endpoints
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{
|
||||
nsn1: 9,
|
||||
nsn2: 3,
|
||||
nsn3: 7,
|
||||
})
|
||||
if len(hcs.services) != 3 {
|
||||
t.Errorf("expected 3 services, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn1].endpoints != 9 {
|
||||
t.Errorf("expected 9 endpoints, got %d", hcs.services[nsn1].endpoints)
|
||||
}
|
||||
if hcs.services[nsn2].endpoints != 3 {
|
||||
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
|
||||
}
|
||||
if hcs.services[nsn3].endpoints != 7 {
|
||||
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
|
||||
}
|
||||
// test the handlers
|
||||
testHandler(hcs, nsn1, http.StatusOK, 9, t)
|
||||
testHandler(hcs, nsn2, http.StatusOK, 3, t)
|
||||
testHandler(hcs, nsn3, http.StatusOK, 7, t)
|
||||
|
||||
// sync new services
|
||||
hcs.SyncServices(map[types.NamespacedName]uint16{
|
||||
//nsn1: 9376, // remove it
|
||||
nsn2: 12909, // leave it
|
||||
nsn3: 11114, // change it
|
||||
nsn4: 11878, // add it
|
||||
})
|
||||
if len(hcs.services) != 3 {
|
||||
t.Errorf("expected 3 service, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn2].endpoints != 3 {
|
||||
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
|
||||
}
|
||||
if hcs.services[nsn3].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn3].endpoints)
|
||||
}
|
||||
if hcs.services[nsn4].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn4].endpoints)
|
||||
}
|
||||
// test the handlers
|
||||
testHandler(hcs, nsn2, http.StatusOK, 3, t)
|
||||
testHandler(hcs, nsn3, http.StatusServiceUnavailable, 0, t)
|
||||
testHandler(hcs, nsn4, http.StatusServiceUnavailable, 0, t)
|
||||
|
||||
// sync endpoints
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{
|
||||
nsn1: 9,
|
||||
nsn2: 3,
|
||||
nsn3: 7,
|
||||
nsn4: 6,
|
||||
})
|
||||
if len(hcs.services) != 3 {
|
||||
t.Errorf("expected 3 services, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn2].endpoints != 3 {
|
||||
t.Errorf("expected 3 endpoints, got %d", hcs.services[nsn2].endpoints)
|
||||
}
|
||||
if hcs.services[nsn3].endpoints != 7 {
|
||||
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
|
||||
}
|
||||
if hcs.services[nsn4].endpoints != 6 {
|
||||
t.Errorf("expected 6 endpoints, got %d", hcs.services[nsn4].endpoints)
|
||||
}
|
||||
// test the handlers
|
||||
testHandler(hcs, nsn2, http.StatusOK, 3, t)
|
||||
testHandler(hcs, nsn3, http.StatusOK, 7, t)
|
||||
testHandler(hcs, nsn4, http.StatusOK, 6, t)
|
||||
|
||||
// sync endpoints, missing nsn2
|
||||
hcs.SyncEndpoints(map[types.NamespacedName]int{
|
||||
nsn3: 7,
|
||||
nsn4: 6,
|
||||
})
|
||||
if len(hcs.services) != 3 {
|
||||
t.Errorf("expected 3 services, got %d", len(hcs.services))
|
||||
}
|
||||
if hcs.services[nsn2].endpoints != 0 {
|
||||
t.Errorf("expected 0 endpoints, got %d", hcs.services[nsn2].endpoints)
|
||||
}
|
||||
if hcs.services[nsn3].endpoints != 7 {
|
||||
t.Errorf("expected 7 endpoints, got %d", hcs.services[nsn3].endpoints)
|
||||
}
|
||||
if hcs.services[nsn4].endpoints != 6 {
|
||||
t.Errorf("expected 6 endpoints, got %d", hcs.services[nsn4].endpoints)
|
||||
}
|
||||
// test the handlers
|
||||
testHandler(hcs, nsn2, http.StatusServiceUnavailable, 0, t)
|
||||
testHandler(hcs, nsn3, http.StatusOK, 7, t)
|
||||
testHandler(hcs, nsn4, http.StatusOK, 6, t)
|
||||
}
|
||||
|
||||
func testHandler(hcs *server, nsn types.NamespacedName, status int, endpoints int, t *testing.T) {
|
||||
handler := hcs.services[nsn].server.(*fakeHTTPServer).handler
|
||||
req, err := http.NewRequest("GET", "/healthz", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(resp, req)
|
||||
|
||||
if resp.Code != status {
|
||||
t.Errorf("expected status code %v, got %v", status, resp.Code)
|
||||
}
|
||||
var payload hcPayload
|
||||
if err := json.Unmarshal(resp.Body.Bytes(), &payload); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if payload.Service.Name != nsn.Name || payload.Service.Namespace != nsn.Namespace {
|
||||
t.Errorf("expected payload name %q, got %v", nsn.String(), payload.Service)
|
||||
}
|
||||
if payload.LocalEndpoints != endpoints {
|
||||
t.Errorf("expected %d endpoints, got %d", endpoints, payload.LocalEndpoints)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthzServer(t *testing.T) {
|
||||
listener := newFakeListener()
|
||||
httpFactory := newFakeHTTPServerFactory()
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
|
||||
hs := newHealthzServer(listener, httpFactory, fakeClock, "127.0.0.1:10256", 10*time.Second, nil, nil)
|
||||
server := hs.httpFactory.New(hs.addr, healthzHandler{hs: hs})
|
||||
|
||||
// Should return 200 "OK" by default.
|
||||
testHealthzHandler(server, http.StatusOK, t)
|
||||
|
||||
// Should return 503 "ServiceUnavailable" if exceed max no respond duration.
|
||||
hs.UpdateTimestamp()
|
||||
fakeClock.Step(25 * time.Second)
|
||||
testHealthzHandler(server, http.StatusServiceUnavailable, t)
|
||||
|
||||
// Should return 200 "OK" if timestamp is valid.
|
||||
hs.UpdateTimestamp()
|
||||
fakeClock.Step(5 * time.Second)
|
||||
testHealthzHandler(server, http.StatusOK, t)
|
||||
}
|
||||
|
||||
func testHealthzHandler(server HTTPServer, status int, t *testing.T) {
|
||||
handler := server.(*fakeHTTPServer).handler
|
||||
req, err := http.NewRequest("GET", "/healthz", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(resp, req)
|
||||
|
||||
if resp.Code != status {
|
||||
t.Errorf("expected status code %v, got %v", status, resp.Code)
|
||||
}
|
||||
var payload healthzPayload
|
||||
if err := json.Unmarshal(resp.Body.Bytes(), &payload); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
73
vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD
generated
vendored
Normal file
73
vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"proxier.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/iptables",
|
||||
deps = [
|
||||
"//pkg/api/service:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/healthcheck:go_default_library",
|
||||
"//pkg/proxy/metrics:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
"//pkg/util/async:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/sysctl:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["proxier_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/iptables",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
"//pkg/util/async:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/iptables/testing:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
6
vendor/k8s.io/kubernetes/pkg/proxy/iptables/OWNERS
generated
vendored
Executable file
6
vendor/k8s.io/kubernetes/pkg/proxy/iptables/OWNERS
generated
vendored
Executable file
@ -0,0 +1,6 @@
|
||||
reviewers:
|
||||
- thockin
|
||||
- smarterclayton
|
||||
- justinsb
|
||||
- freehan
|
||||
- dcbw
|
1756
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
Normal file
1756
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2498
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go
generated
vendored
Normal file
2498
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
98
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD
generated
vendored
Normal file
98
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"ipset_test.go",
|
||||
"proxier_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/ipvs",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/ipvs/testing:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
"//pkg/util/ipset:go_default_library",
|
||||
"//pkg/util/ipset/testing:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/iptables/testing:go_default_library",
|
||||
"//pkg/util/ipvs:go_default_library",
|
||||
"//pkg/util/ipvs/testing:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"ipset.go",
|
||||
"netlink.go",
|
||||
"netlink_unsupported.go",
|
||||
"proxier.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"netlink_linux.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/ipvs",
|
||||
deps = [
|
||||
"//pkg/api/service:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/healthcheck:go_default_library",
|
||||
"//pkg/proxy/metrics:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
"//pkg/util/async:go_default_library",
|
||||
"//pkg/util/ipset:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/ipvs:go_default_library",
|
||||
"//pkg/util/sysctl:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"//vendor/github.com/vishvananda/netlink:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/proxy/ipvs/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
75
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md
generated
vendored
Normal file
75
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
# How to use IPVS
|
||||
|
||||
This document shows how to use kube-proxy ipvs mode.
|
||||
|
||||
## What is IPVS
|
||||
|
||||
**IPVS (IP Virtual Server)** implements transport-layer load balancing, usually called Layer 4 LAN switching, as part of
|
||||
Linux kernel.
|
||||
|
||||
IPVS runs on a host and acts as a load balancer in front of a cluster of real servers. IPVS can direct requests for TCP
|
||||
and UDP-based services to the real servers, and make services of real servers appear as irtual services on a single IP address.
|
||||
|
||||
## How to use
|
||||
|
||||
#### Load IPVS kernel modules
|
||||
|
||||
Currently the IPVS kernel module can't be loaded automatically, so first we should use the following command to load IPVS kernel
|
||||
modules manually.
|
||||
|
||||
```shell
|
||||
modprobe ip_vs
|
||||
modprobe ip_vs_rr
|
||||
modprobe ip_vs_wrr
|
||||
modprobe ip_vs_sh
|
||||
modprobe nf_conntrack_ipv4
|
||||
```
|
||||
|
||||
After that, use `lsmod | grep ip_vs` to make sure kernel modules are loaded.
|
||||
|
||||
#### Run kube-proxy in ipvs mode
|
||||
|
||||
#### Local UP Cluster
|
||||
|
||||
Kube-proxy will run in iptables mode by default in a [local-up cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md).
|
||||
|
||||
Users should export the env `KUBEPROXY_MODE=ipvs` to specify the ipvs mode before deploying the cluster if want to run kube-proxy in ipvs mode.
|
||||
|
||||
#### Cluster Created by Kubeadm
|
||||
|
||||
Kube-proxy will run in iptables mode by default in a cluster deployed by [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/).
|
||||
|
||||
Since IPVS mode is still feature-gated, users should add the flag `--feature-gates=SupportIPVSProxyMode=true` in `kubeadm init` command
|
||||
|
||||
```
|
||||
kubeadm init --feature-gates=SupportIPVSProxyMode=true
|
||||
```
|
||||
|
||||
to specify the ipvs mode before deploying the cluster if want to run kube-proxy in ipvs mode.
|
||||
|
||||
If you are using kubeadm with a configuration file, you can specify the ipvs mode adding `SupportIPVSProxyMode: true` below the `featureGates` field.
|
||||
Then the configuration file is similar to:
|
||||
|
||||
```json
|
||||
kind: MasterConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1alpha1
|
||||
...
|
||||
featureGates:
|
||||
SupportIPVSProxyMode: true
|
||||
...
|
||||
```
|
||||
|
||||
#### Test
|
||||
|
||||
Use `ipvsadm` tool to test whether the kube-proxy start succeed. By default we may get result like:
|
||||
|
||||
```shell
|
||||
# ipvsadm -ln
|
||||
IP Virtual Server version 1.2.1 (size=4096)
|
||||
Prot LocalAddress:Port Scheduler Flags
|
||||
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
|
||||
TCP 10.0.0.1:443 rr persistent 10800
|
||||
-> 10.229.43.2:6443 Masq 1 0 0
|
||||
TCP 10.0.0.10:53 rr
|
||||
UDP 10.0.0.10:53 rr
|
||||
```
|
157
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go
generated
vendored
Normal file
157
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipvs
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilipset "k8s.io/kubernetes/pkg/util/ipset"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
// MinIPSetCheckVersion is the min ipset version we need. IPv6 is supported in ipset 6.x
|
||||
MinIPSetCheckVersion = "6.0"
|
||||
|
||||
// KubeLoopBackIPSet is used to store endpoints dst ip:port, source ip for solving hairpin purpose.
|
||||
KubeLoopBackIPSet = "KUBE-LOOP-BACK"
|
||||
|
||||
// KubeClusterIPSet is used to store service cluster ip + port for masquerade purpose.
|
||||
KubeClusterIPSet = "KUBE-CLUSTER-IP"
|
||||
|
||||
// KubeExternalIPSet is used to store service external ip + port for masquerade and filter purpose.
|
||||
KubeExternalIPSet = "KUBE-EXTERNAL-IP"
|
||||
|
||||
// KubeLoadBalancerSet is used to store service load balancer ingress ip + port, it is the service lb portal.
|
||||
KubeLoadBalancerSet = "KUBE-LOAD-BALANCER"
|
||||
|
||||
// KubeLoadBalancerMasqSet is used to store service load balancer ingress ip + port for masquerade purpose.
|
||||
KubeLoadBalancerMasqSet = "KUBE-LOAD-BALANCER-MASQ"
|
||||
|
||||
// KubeLoadBalancerSourceIPSet is used to store service load balancer ingress ip + port + source IP for packet filter purpose.
|
||||
KubeLoadBalancerSourceIPSet = "KUBE-LOAD-BALANCER-SOURCE-IP"
|
||||
|
||||
// KubeLoadBalancerSourceCIDRSet is used to store service load balancer ingress ip + port + source cidr for packet filter purpose.
|
||||
KubeLoadBalancerSourceCIDRSet = "KUBE-LOAD-BALANCER-SOURCE-CIDR"
|
||||
|
||||
// KubeNodePortSetTCP is used to store nodeport TCP port for masquerade purpose.
|
||||
KubeNodePortSetTCP = "KUBE-NODE-PORT-TCP"
|
||||
|
||||
// KubeNodePortSetUDP is used to store nodeport UDP port for masquerade purpose.
|
||||
KubeNodePortSetUDP = "KUBE-NODE-PORT-UDP"
|
||||
)
|
||||
|
||||
// IPSetVersioner can query the current ipset version.
|
||||
type IPSetVersioner interface {
|
||||
// returns "X.Y"
|
||||
GetVersion() (string, error)
|
||||
}
|
||||
|
||||
// IPSet wraps util/ipset which is used by IPVS proxier.
|
||||
type IPSet struct {
|
||||
utilipset.IPSet
|
||||
// activeEntries is the current active entries of the ipset.
|
||||
activeEntries sets.String
|
||||
// handle is the util ipset interface handle.
|
||||
handle utilipset.Interface
|
||||
}
|
||||
|
||||
// NewIPSet initialize a new IPSet struct
|
||||
func NewIPSet(handle utilipset.Interface, name string, setType utilipset.Type, isIPv6 bool) *IPSet {
|
||||
hashFamily := utilipset.ProtocolFamilyIPV4
|
||||
if isIPv6 {
|
||||
hashFamily = utilipset.ProtocolFamilyIPV6
|
||||
}
|
||||
set := &IPSet{
|
||||
IPSet: utilipset.IPSet{
|
||||
Name: name,
|
||||
SetType: setType,
|
||||
HashFamily: hashFamily,
|
||||
},
|
||||
activeEntries: sets.NewString(),
|
||||
handle: handle,
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
func (set *IPSet) isEmpty() bool {
|
||||
return len(set.activeEntries.UnsortedList()) == 0
|
||||
}
|
||||
|
||||
func (set *IPSet) resetEntries() {
|
||||
set.activeEntries = sets.NewString()
|
||||
}
|
||||
|
||||
func (set *IPSet) syncIPSetEntries() {
|
||||
appliedEntries, err := set.handle.ListEntries(set.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list ip set entries, error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// currentIPSetEntries represents Endpoints watched from API Server.
|
||||
currentIPSetEntries := sets.NewString()
|
||||
for _, appliedEntry := range appliedEntries {
|
||||
currentIPSetEntries.Insert(appliedEntry)
|
||||
}
|
||||
|
||||
if !set.activeEntries.Equal(currentIPSetEntries) {
|
||||
// Clean legacy entries
|
||||
for _, entry := range currentIPSetEntries.Difference(set.activeEntries).List() {
|
||||
if err := set.handle.DelEntry(entry, set.Name); err != nil {
|
||||
glog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err)
|
||||
} else {
|
||||
glog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name)
|
||||
}
|
||||
}
|
||||
// Create active entries
|
||||
for _, entry := range set.activeEntries.Difference(currentIPSetEntries).List() {
|
||||
if err := set.handle.AddEntry(entry, set.Name, true); err != nil {
|
||||
glog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err)
|
||||
} else {
|
||||
glog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureIPSets(ipSets ...*IPSet) error {
|
||||
for _, set := range ipSets {
|
||||
if err := set.handle.CreateSet(&set.IPSet, true); err != nil {
|
||||
glog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkMinVersion checks if ipset current version satisfies required min version
|
||||
func checkMinVersion(vstring string) bool {
|
||||
version, err := utilversion.ParseGeneric(vstring)
|
||||
if err != nil {
|
||||
glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err)
|
||||
return false
|
||||
}
|
||||
|
||||
minVersion, err := utilversion.ParseGeneric(MinIPSetCheckVersion)
|
||||
if err != nil {
|
||||
glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err)
|
||||
return false
|
||||
}
|
||||
return !version.LessThan(minVersion)
|
||||
}
|
49
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset_test.go
generated
vendored
Normal file
49
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset_test.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipvs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCheckIPSetVersion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
vstring string
|
||||
valid bool
|
||||
}{
|
||||
// version less than "6.0" is not valid.
|
||||
{"4.0", false},
|
||||
{"5.1", false},
|
||||
{"5.1.2", false},
|
||||
// "7" is not a valid version string.
|
||||
{"7", false},
|
||||
{"6.0", true},
|
||||
{"6.1", true},
|
||||
{"6.19", true},
|
||||
{"7.0", true},
|
||||
{"8.1.2", true},
|
||||
{"9.3.4.0", true},
|
||||
{"total junk", false},
|
||||
}
|
||||
|
||||
for i := range testCases {
|
||||
valid := checkMinVersion(testCases[i].vstring)
|
||||
if testCases[i].valid != valid {
|
||||
t.Errorf("Expected result: %v, Got result: %v", testCases[i].valid, valid)
|
||||
}
|
||||
}
|
||||
}
|
29
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipvs
|
||||
|
||||
// NetLinkHandle for revoke netlink interface
|
||||
type NetLinkHandle interface {
|
||||
// EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true.
|
||||
EnsureAddressBind(address, devName string) (exist bool, err error)
|
||||
// UnbindAddress unbind address from the interface
|
||||
UnbindAddress(address, devName string) error
|
||||
// EnsureDummyDevice checks if dummy device is exist and, if not, create one. If the dummy device is already exist, return true.
|
||||
EnsureDummyDevice(devName string) (exist bool, err error)
|
||||
// DeleteDummyDevice deletes the given dummy device by name.
|
||||
DeleteDummyDevice(devName string) error
|
||||
}
|
98
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go
generated
vendored
Normal file
98
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipvs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
||||
type netlinkHandle struct {
|
||||
netlink.Handle
|
||||
}
|
||||
|
||||
// NewNetLinkHandle will crate a new netlinkHandle
|
||||
func NewNetLinkHandle() NetLinkHandle {
|
||||
return &netlinkHandle{netlink.Handle{}}
|
||||
}
|
||||
|
||||
// EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true.
|
||||
func (h *netlinkHandle) EnsureAddressBind(address, devName string) (exist bool, err error) {
|
||||
dev, err := h.LinkByName(devName)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error get interface: %s, err: %v", devName, err)
|
||||
}
|
||||
addr := net.ParseIP(address)
|
||||
if addr == nil {
|
||||
return false, fmt.Errorf("error parse ip address: %s", address)
|
||||
}
|
||||
if err := h.AddrAdd(dev, &netlink.Addr{IPNet: netlink.NewIPNet(addr)}); err != nil {
|
||||
// "EEXIST" will be returned if the address is already bound to device
|
||||
if err == syscall.Errno(syscall.EEXIST) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("error bind address: %s to interface: %s, err: %v", address, devName, err)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// UnbindAddress unbind address from the interface
|
||||
func (h *netlinkHandle) UnbindAddress(address, devName string) error {
|
||||
dev, err := h.LinkByName(devName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error get interface: %s, err: %v", devName, err)
|
||||
}
|
||||
addr := net.ParseIP(address)
|
||||
if addr == nil {
|
||||
return fmt.Errorf("error parse ip address: %s", address)
|
||||
}
|
||||
if err := h.AddrDel(dev, &netlink.Addr{IPNet: netlink.NewIPNet(addr)}); err != nil {
|
||||
return fmt.Errorf("error unbind address: %s from interface: %s, err: %v", address, devName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureDummyDevice is part of interface
|
||||
func (h *netlinkHandle) EnsureDummyDevice(devName string) (bool, error) {
|
||||
_, err := h.LinkByName(devName)
|
||||
if err == nil {
|
||||
// found dummy device
|
||||
return true, nil
|
||||
}
|
||||
dummy := &netlink.Dummy{
|
||||
LinkAttrs: netlink.LinkAttrs{Name: devName},
|
||||
}
|
||||
return false, h.LinkAdd(dummy)
|
||||
}
|
||||
|
||||
// DeleteDummyDevice is part of interface.
|
||||
func (h *netlinkHandle) DeleteDummyDevice(devName string) error {
|
||||
link, err := h.LinkByName(devName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting a non-exist dummy device: %s", devName)
|
||||
}
|
||||
dummy, ok := link.(*netlink.Dummy)
|
||||
if !ok {
|
||||
return fmt.Errorf("expect dummy device, got device type: %s", link.Type())
|
||||
}
|
||||
return h.LinkDel(dummy)
|
||||
}
|
51
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipvs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type emptyHandle struct {
|
||||
}
|
||||
|
||||
// NewNetLinkHandle will create an EmptyHandle
|
||||
func NewNetLinkHandle() NetLinkHandle {
|
||||
return &emptyHandle{}
|
||||
}
|
||||
|
||||
// EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true.
|
||||
func (h *emptyHandle) EnsureAddressBind(address, devName string) (exist bool, err error) {
|
||||
return false, fmt.Errorf("netlink not supported for this platform")
|
||||
}
|
||||
|
||||
// UnbindAddress unbind address from the interface
|
||||
func (h *emptyHandle) UnbindAddress(address, devName string) error {
|
||||
return fmt.Errorf("netlink not supported for this platform")
|
||||
}
|
||||
|
||||
// EnsureDummyDevice is part of interface
|
||||
func (h *emptyHandle) EnsureDummyDevice(devName string) (bool, error) {
|
||||
return false, fmt.Errorf("netlink is not supported in this platform")
|
||||
}
|
||||
|
||||
// DeleteDummyDevice is part of interface.
|
||||
func (h *emptyHandle) DeleteDummyDevice(devName string) error {
|
||||
return fmt.Errorf("netlink is not supported in this platform")
|
||||
}
|
1850
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go
generated
vendored
Normal file
1850
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2250
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier_test.go
generated
vendored
Normal file
2250
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
28
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/testing/BUILD
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/testing/BUILD
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["fake.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/ipvs/testing",
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
46
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/testing/fake.go
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/testing/fake.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
//FakeNetlinkHandle mock implementation of proxy NetlinkHandle
|
||||
type FakeNetlinkHandle struct {
|
||||
}
|
||||
|
||||
//NewFakeNetlinkHandle will create a new FakeNetlinkHandle
|
||||
func NewFakeNetlinkHandle() *FakeNetlinkHandle {
|
||||
return &FakeNetlinkHandle{}
|
||||
}
|
||||
|
||||
//EnsureAddressBind is a mock implementation
|
||||
func (h *FakeNetlinkHandle) EnsureAddressBind(address, devName string) (exist bool, err error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
//UnbindAddress is a mock implementation
|
||||
func (h *FakeNetlinkHandle) UnbindAddress(address, devName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureDummyDevice is a mock implementation
|
||||
func (h *FakeNetlinkHandle) EnsureDummyDevice(devName string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// DeleteDummyDevice is a mock implementation
|
||||
func (h *FakeNetlinkHandle) DeleteDummyDevice(devName string) error {
|
||||
return nil
|
||||
}
|
23
vendor/k8s.io/kubernetes/pkg/proxy/metrics/BUILD
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/pkg/proxy/metrics/BUILD
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["metrics.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/metrics",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
52
vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const kubeProxySubsystem = "kubeproxy"
|
||||
|
||||
var (
|
||||
// SyncProxyRulesLatency is the latency of one round of kube-proxy syncing proxy rules.
|
||||
SyncProxyRulesLatency = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: kubeProxySubsystem,
|
||||
Name: "sync_proxy_rules_latency_microseconds",
|
||||
Help: "SyncProxyRules latency",
|
||||
Buckets: prometheus.ExponentialBuckets(1000, 2, 15),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetricsOnce sync.Once
|
||||
|
||||
// RegisterMetrics registers sync proxy rules latency metrics
|
||||
func RegisterMetrics() {
|
||||
registerMetricsOnce.Do(func() {
|
||||
prometheus.MustRegister(SyncProxyRulesLatency)
|
||||
})
|
||||
}
|
||||
|
||||
// SinceInMicroseconds gets the time since the specified start in microseconds.
|
||||
func SinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
44
vendor/k8s.io/kubernetes/pkg/proxy/types.go
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/pkg/proxy/types.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// ProxyProvider is the interface provided by proxier implementations.
|
||||
type ProxyProvider interface {
|
||||
// Sync immediately synchronizes the ProxyProvider's current state to iptables.
|
||||
Sync()
|
||||
// SyncLoop runs periodic work.
|
||||
// This is expected to run as a goroutine or as the main loop of the app.
|
||||
// It does not return.
|
||||
SyncLoop()
|
||||
}
|
||||
|
||||
// ServicePortName carries a namespace + name + portname. This is the unique
|
||||
// identfier for a load-balanced service.
|
||||
type ServicePortName struct {
|
||||
types.NamespacedName
|
||||
Port string
|
||||
}
|
||||
|
||||
func (spn ServicePortName) String() string {
|
||||
return fmt.Sprintf("%s:%s", spn.NamespacedName.String(), spn.Port)
|
||||
}
|
78
vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"loadbalancer.go",
|
||||
"port_allocator.go",
|
||||
"proxier.go",
|
||||
"proxysocket.go",
|
||||
"rlimit.go",
|
||||
"roundrobin.go",
|
||||
"udp_server.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"rlimit_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/userspace",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"port_allocator_test.go",
|
||||
"proxier_test.go",
|
||||
"roundrobin_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/userspace",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/iptables/testing:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
5
vendor/k8s.io/kubernetes/pkg/proxy/userspace/OWNERS
generated
vendored
Executable file
5
vendor/k8s.io/kubernetes/pkg/proxy/userspace/OWNERS
generated
vendored
Executable file
@ -0,0 +1,5 @@
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- freehan
|
34
vendor/k8s.io/kubernetes/pkg/proxy/userspace/loadbalancer.go
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/pkg/proxy/userspace/loadbalancer.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"net"
|
||||
)
|
||||
|
||||
// LoadBalancer is an interface for distributing incoming requests to service endpoints.
|
||||
type LoadBalancer interface {
|
||||
// NextEndpoint returns the endpoint to handle a request for the given
|
||||
// service-port and source address.
|
||||
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
|
||||
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeSeconds int) error
|
||||
DeleteService(service proxy.ServicePortName)
|
||||
CleanupStaleStickySessions(service proxy.ServicePortName)
|
||||
ServiceHasEndpoints(service proxy.ServicePortName) bool
|
||||
}
|
158
vendor/k8s.io/kubernetes/pkg/proxy/userspace/port_allocator.go
generated
vendored
Normal file
158
vendor/k8s.io/kubernetes/pkg/proxy/userspace/port_allocator.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
var (
|
||||
errPortRangeNoPortsRemaining = errors.New("port allocation failed; there are no remaining ports left to allocate in the accepted range")
|
||||
)
|
||||
|
||||
type PortAllocator interface {
|
||||
AllocateNext() (int, error)
|
||||
Release(int)
|
||||
}
|
||||
|
||||
// randomAllocator is a PortAllocator implementation that allocates random ports, yielding
|
||||
// a port value of 0 for every call to AllocateNext().
|
||||
type randomAllocator struct{}
|
||||
|
||||
// AllocateNext always returns 0
|
||||
func (r *randomAllocator) AllocateNext() (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Release is a noop
|
||||
func (r *randomAllocator) Release(_ int) {
|
||||
// noop
|
||||
}
|
||||
|
||||
// newPortAllocator builds PortAllocator for a given PortRange. If the PortRange is empty
|
||||
// then a random port allocator is returned; otherwise, a new range-based allocator
|
||||
// is returned.
|
||||
func newPortAllocator(r net.PortRange) PortAllocator {
|
||||
if r.Base == 0 {
|
||||
return &randomAllocator{}
|
||||
}
|
||||
return newPortRangeAllocator(r, true)
|
||||
}
|
||||
|
||||
const (
|
||||
portsBufSize = 16
|
||||
nextFreePortCooldown = 500 * time.Millisecond
|
||||
allocateNextTimeout = 1 * time.Second
|
||||
)
|
||||
|
||||
type rangeAllocator struct {
|
||||
net.PortRange
|
||||
ports chan int
|
||||
used big.Int
|
||||
lock sync.Mutex
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
func newPortRangeAllocator(r net.PortRange, autoFill bool) PortAllocator {
|
||||
if r.Base == 0 || r.Size == 0 {
|
||||
panic("illegal argument: may not specify an empty port range")
|
||||
}
|
||||
ra := &rangeAllocator{
|
||||
PortRange: r,
|
||||
ports: make(chan int, portsBufSize),
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
if autoFill {
|
||||
go wait.Forever(func() { ra.fillPorts() }, nextFreePortCooldown)
|
||||
}
|
||||
return ra
|
||||
}
|
||||
|
||||
// fillPorts loops, always searching for the next free port and, if found, fills the ports buffer with it.
|
||||
// this func blocks unless there are no remaining free ports.
|
||||
func (r *rangeAllocator) fillPorts() {
|
||||
for {
|
||||
if !r.fillPortsOnce() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) fillPortsOnce() bool {
|
||||
port := r.nextFreePort()
|
||||
if port == -1 {
|
||||
return false
|
||||
}
|
||||
r.ports <- port
|
||||
return true
|
||||
}
|
||||
|
||||
// nextFreePort finds a free port, first picking a random port. if that port is already in use
|
||||
// then the port range is scanned sequentially until either a port is found or the scan completes
|
||||
// unsuccessfully. an unsuccessful scan returns a port of -1.
|
||||
func (r *rangeAllocator) nextFreePort() int {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
// choose random port
|
||||
j := r.rand.Intn(r.Size)
|
||||
if b := r.used.Bit(j); b == 0 {
|
||||
r.used.SetBit(&r.used, j, 1)
|
||||
return j + r.Base
|
||||
}
|
||||
|
||||
// search sequentially
|
||||
for i := j + 1; i < r.Size; i++ {
|
||||
if b := r.used.Bit(i); b == 0 {
|
||||
r.used.SetBit(&r.used, i, 1)
|
||||
return i + r.Base
|
||||
}
|
||||
}
|
||||
for i := 0; i < j; i++ {
|
||||
if b := r.used.Bit(i); b == 0 {
|
||||
r.used.SetBit(&r.used, i, 1)
|
||||
return i + r.Base
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) AllocateNext() (port int, err error) {
|
||||
select {
|
||||
case port = <-r.ports:
|
||||
case <-time.After(allocateNextTimeout):
|
||||
err = errPortRangeNoPortsRemaining
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) Release(port int) {
|
||||
port -= r.Base
|
||||
if port < 0 || port >= r.Size {
|
||||
return
|
||||
}
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
r.used.SetBit(&r.used, port, 0)
|
||||
}
|
178
vendor/k8s.io/kubernetes/pkg/proxy/userspace/port_allocator_test.go
generated
vendored
Normal file
178
vendor/k8s.io/kubernetes/pkg/proxy/userspace/port_allocator_test.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
)
|
||||
|
||||
func TestRangeAllocatorEmpty(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("0-0")
|
||||
defer func() {
|
||||
if rv := recover(); rv == nil {
|
||||
t.Fatalf("expected panic because of empty port range: %#v", r)
|
||||
}
|
||||
}()
|
||||
_ = newPortRangeAllocator(*r, true)
|
||||
}
|
||||
|
||||
func TestRangeAllocatorFullyAllocated(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("1-1")
|
||||
// Don't auto-fill ports, we'll manually turn the crank
|
||||
pra := newPortRangeAllocator(*r, false)
|
||||
a := pra.(*rangeAllocator)
|
||||
|
||||
// Fill in the one available port
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
|
||||
// There should be no ports available
|
||||
if a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be unable to fill ports")
|
||||
}
|
||||
|
||||
p, err := a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if p != 1 {
|
||||
t.Fatalf("unexpected allocated port: %d", p)
|
||||
}
|
||||
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(p - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
||||
}
|
||||
a.lock.Unlock()
|
||||
|
||||
_, err = a.AllocateNext()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range")
|
||||
}
|
||||
|
||||
a.Release(p)
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(p - a.Base); bit != 0 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
||||
}
|
||||
a.lock.Unlock()
|
||||
|
||||
// Fill in the one available port
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
|
||||
p, err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if p != 1 {
|
||||
t.Fatalf("unexpected allocated port: %d", p)
|
||||
}
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(p - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
||||
}
|
||||
a.lock.Unlock()
|
||||
|
||||
_, err = a.AllocateNext()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRangeAllocator_RandomishAllocation(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("1-100")
|
||||
pra := newPortRangeAllocator(*r, false)
|
||||
a := pra.(*rangeAllocator)
|
||||
|
||||
// allocate all the ports
|
||||
var err error
|
||||
ports := make([]int, 100, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
ports[i], err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if ports[i] < 1 || ports[i] > 100 {
|
||||
t.Fatalf("unexpected allocated port: %d", ports[i])
|
||||
}
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(ports[i] - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
|
||||
}
|
||||
a.lock.Unlock()
|
||||
}
|
||||
|
||||
if a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be unable to fill ports")
|
||||
}
|
||||
|
||||
// release them all
|
||||
for i := 0; i < 100; i++ {
|
||||
a.Release(ports[i])
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(ports[i] - a.Base); bit != 0 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
|
||||
}
|
||||
a.lock.Unlock()
|
||||
}
|
||||
|
||||
// allocate the ports again
|
||||
rports := make([]int, 100, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
rports[i], err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if rports[i] < 1 || rports[i] > 100 {
|
||||
t.Fatalf("unexpected allocated port: %d", rports[i])
|
||||
}
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(rports[i] - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", rports[i])
|
||||
}
|
||||
a.lock.Unlock()
|
||||
}
|
||||
|
||||
if a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be unable to fill ports")
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(ports, rports) {
|
||||
t.Fatalf("expected re-allocated ports to be in a somewhat random order")
|
||||
}
|
||||
}
|
1108
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go
generated
vendored
Normal file
1108
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
923
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier_test.go
generated
vendored
Normal file
923
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier_test.go
generated
vendored
Normal file
@ -0,0 +1,923 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
|
||||
"k8s.io/utils/exec"
|
||||
fakeexec "k8s.io/utils/exec/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
udpIdleTimeoutForTest = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
func joinHostPort(host string, port int) string {
|
||||
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
||||
}
|
||||
|
||||
func waitForClosedPortTCP(p *Proxier, proxyPort int) error {
|
||||
for i := 0; i < 50; i++ {
|
||||
conn, err := net.Dial("tcp", joinHostPort("", proxyPort))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
conn.Close()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("port %d still open", proxyPort)
|
||||
}
|
||||
|
||||
func waitForClosedPortUDP(p *Proxier, proxyPort int) error {
|
||||
for i := 0; i < 50; i++ {
|
||||
conn, err := net.Dial("udp", joinHostPort("", proxyPort))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
|
||||
// To detect a closed UDP port write, then read.
|
||||
_, err = conn.Write([]byte("x"))
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok && !e.Timeout() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
var buf [4]byte
|
||||
_, err = conn.Read(buf[0:])
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok && !e.Timeout() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
conn.Close()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("port %d still open", proxyPort)
|
||||
}
|
||||
|
||||
var tcpServerPort int32
|
||||
var udpServerPort int32
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Don't handle panics
|
||||
runtime.ReallyCrash = true
|
||||
|
||||
// TCP setup.
|
||||
tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(r.URL.Path[1:]))
|
||||
}))
|
||||
defer tcp.Close()
|
||||
|
||||
u, err := url.Parse(tcp.URL)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
_, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
tcpServerPortValue, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
||||
}
|
||||
tcpServerPort = int32(tcpServerPortValue)
|
||||
|
||||
// UDP setup.
|
||||
udp, err := newUDPEchoServer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to make a UDP server: %v", err))
|
||||
}
|
||||
_, port, err = net.SplitHostPort(udp.LocalAddr().String())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
udpServerPortValue, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
||||
}
|
||||
udpServerPort = int32(udpServerPortValue)
|
||||
go udp.Loop()
|
||||
|
||||
ret := m.Run()
|
||||
// it should be safe to call Close() multiple times.
|
||||
tcp.Close()
|
||||
os.Exit(ret)
|
||||
}
|
||||
|
||||
func testEchoTCP(t *testing.T, address string, port int) {
|
||||
path := "aaaaa"
|
||||
res, err := http.Get("http://" + address + ":" + fmt.Sprintf("%d", port) + "/" + path)
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to server: %v", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Errorf("error reading data: %v %v", err, string(data))
|
||||
}
|
||||
if string(data) != path {
|
||||
t.Errorf("expected: %s, got %s", path, string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func testEchoUDP(t *testing.T, address string, port int) {
|
||||
data := "abc123"
|
||||
|
||||
conn, err := net.Dial("udp", joinHostPort(address, port))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to server: %v", err)
|
||||
}
|
||||
if _, err := conn.Write([]byte(data)); err != nil {
|
||||
t.Fatalf("error sending to server: %v", err)
|
||||
}
|
||||
var resp [1024]byte
|
||||
n, err := conn.Read(resp[0:])
|
||||
if err != nil {
|
||||
t.Errorf("error receiving data: %v", err)
|
||||
}
|
||||
if string(resp[0:n]) != data {
|
||||
t.Errorf("expected: %s, got %s", data, string(resp[0:n]))
|
||||
}
|
||||
}
|
||||
|
||||
func waitForNumProxyLoops(t *testing.T, p *Proxier, want int32) {
|
||||
var got int32
|
||||
for i := 0; i < 600; i++ {
|
||||
got = atomic.LoadInt32(&p.numProxyLoops)
|
||||
if got == want {
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
t.Errorf("expected %d ProxyLoops running, got %d", want, got)
|
||||
}
|
||||
|
||||
func waitForNumProxyClients(t *testing.T, s *ServiceInfo, want int, timeout time.Duration) {
|
||||
var got int
|
||||
now := time.Now()
|
||||
deadline := now.Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
s.ActiveClients.Mu.Lock()
|
||||
got = len(s.ActiveClients.Clients)
|
||||
s.ActiveClients.Mu.Unlock()
|
||||
if got == want {
|
||||
return
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
t.Errorf("expected %d ProxyClients live, got %d", want, got)
|
||||
}
|
||||
|
||||
func TestTCPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyTimeout(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
// When connecting to a UDP service endpoint, there should be a Conn for proxy.
|
||||
waitForNumProxyClients(t, svcInfo, 1, time.Second)
|
||||
// If conn has no activity for serviceInfo.timeout since last Read/Write, it should be closed because of timeout.
|
||||
waitForNumProxyClients(t, svcInfo, 0, 2*time.Second)
|
||||
}
|
||||
|
||||
func TestMultiPortProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfoP, err := p.addServiceOnPort(serviceP, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfoP.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
svcInfoQ, err := p.addServiceOnPort(serviceQ, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfoQ.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 2)
|
||||
}
|
||||
|
||||
func TestMultiPortOnServiceAdd(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "q"}
|
||||
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 80,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "q",
|
||||
Port: 81,
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
waitForNumProxyLoops(t, p, 2)
|
||||
svcInfo, exists := p.getServiceInfo(serviceP)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", serviceP)
|
||||
}
|
||||
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 80 || svcInfo.protocol != "TCP" {
|
||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
|
||||
}
|
||||
|
||||
svcInfo, exists = p.getServiceInfo(serviceQ)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", serviceQ)
|
||||
}
|
||||
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 81 || svcInfo.protocol != "UDP" {
|
||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo)
|
||||
}
|
||||
|
||||
svcInfo, exists = p.getServiceInfo(serviceX)
|
||||
if exists {
|
||||
t.Fatalf("found unwanted serviceInfo for %s: %#v", serviceX, svcInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: Stops the proxy for the named service.
|
||||
func stopProxyByName(proxier *Proxier, service proxy.ServicePortName) error {
|
||||
info, found := proxier.getServiceInfo(service)
|
||||
if !found {
|
||||
return fmt.Errorf("unknown service: %s", service)
|
||||
}
|
||||
return proxier.stopProxy(service, info)
|
||||
}
|
||||
|
||||
func TestTCPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
if !svcInfo.IsAlive() {
|
||||
t.Fatalf("wrong value for IsAlive(): expected true")
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
stopProxyByName(p, service)
|
||||
if svcInfo.IsAlive() {
|
||||
t.Fatalf("wrong value for IsAlive(): expected false")
|
||||
}
|
||||
// Wait for the port to really close.
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestUDPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
stopProxyByName(p, service)
|
||||
// Wait for the port to really close.
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
})
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
})
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
// need to add endpoint here because it got clean up during service delete
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
})
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", service)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
// need to add endpoint here because it got clean up during service delete
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 99,
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
// This is a bit async, but this should be sufficient.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 99,
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestProxyUpdatePublicIPs(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
}},
|
||||
ClusterIP: svcInfo.portal.ip.String(),
|
||||
ExternalIPs: []string{"4.3.2.1"},
|
||||
},
|
||||
})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
// This is a bit async, but this should be sufficient.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestProxyUpdatePortal(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
|
||||
fexec := makeFakeExec()
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
svcv0 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
|
||||
svcv1 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
p.OnServiceUpdate(svcv0, svcv1)
|
||||
_, exists := p.getServiceInfo(service)
|
||||
if exists {
|
||||
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
|
||||
}
|
||||
|
||||
svcv2 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
p.OnServiceUpdate(svcv1, svcv2)
|
||||
_, exists = p.getServiceInfo(service)
|
||||
if exists {
|
||||
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
|
||||
}
|
||||
|
||||
svcv3 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
p.OnServiceUpdate(svcv2, svcv3)
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
svcInfo, exists = p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("service with ClusterIP set not found in the proxy")
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func makeFakeExec() *fakeexec.FakeExec {
|
||||
fcmd := fakeexec.FakeCmd{
|
||||
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
|
||||
},
|
||||
}
|
||||
return &fakeexec.FakeExec{
|
||||
CommandScript: []fakeexec.FakeCommandAction{
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
},
|
||||
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in
|
302
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go
generated
vendored
Normal file
302
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go
generated
vendored
Normal file
@ -0,0 +1,302 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
// Abstraction over TCP/UDP sockets which are proxied.
|
||||
type ProxySocket interface {
|
||||
// Addr gets the net.Addr for a ProxySocket.
|
||||
Addr() net.Addr
|
||||
// Close stops the ProxySocket from accepting incoming connections.
|
||||
// Each implementation should comment on the impact of calling Close
|
||||
// while sessions are active.
|
||||
Close() error
|
||||
// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
|
||||
ProxyLoop(service proxy.ServicePortName, info *ServiceInfo, loadBalancer LoadBalancer)
|
||||
// ListenPort returns the host port that the ProxySocket is listening on
|
||||
ListenPort() int
|
||||
}
|
||||
|
||||
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (ProxySocket, error) {
|
||||
host := ""
|
||||
if ip != nil {
|
||||
host = ip.String()
|
||||
}
|
||||
|
||||
switch strings.ToUpper(string(protocol)) {
|
||||
case "TCP":
|
||||
listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tcpProxySocket{Listener: listener, port: port}, nil
|
||||
case "UDP":
|
||||
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpProxySocket{UDPConn: conn, port: port}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown protocol %q", protocol)
|
||||
}
|
||||
|
||||
// How long we wait for a connection to a backend in seconds
|
||||
var EndpointDialTimeouts = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
|
||||
|
||||
// tcpProxySocket implements ProxySocket. Close() is implemented by net.Listener. When Close() is called,
|
||||
// no new connections are allowed but existing connections are left untouched.
|
||||
type tcpProxySocket struct {
|
||||
net.Listener
|
||||
port int
|
||||
}
|
||||
|
||||
func (tcp *tcpProxySocket) ListenPort() int {
|
||||
return tcp.port
|
||||
}
|
||||
|
||||
// TryConnectEndpoints attempts to connect to the next available endpoint for the given service, cycling
|
||||
// through until it is able to successully connect, or it has tried with all timeouts in EndpointDialTimeouts.
|
||||
func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protocol string, loadBalancer LoadBalancer) (out net.Conn, err error) {
|
||||
sessionAffinityReset := false
|
||||
for _, dialTimeout := range EndpointDialTimeouts {
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
|
||||
if err != nil {
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Dial failed: " + err.Error())
|
||||
}
|
||||
glog.Errorf("Dial failed: %v", err)
|
||||
sessionAffinityReset = true
|
||||
continue
|
||||
}
|
||||
return outConn, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to connect to an endpoint.")
|
||||
}
|
||||
|
||||
func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *ServiceInfo, loadBalancer LoadBalancer) {
|
||||
for {
|
||||
if !myInfo.IsAlive() {
|
||||
// The service port was closed or replaced.
|
||||
return
|
||||
}
|
||||
// Block until a connection is made.
|
||||
inConn, err := tcp.Accept()
|
||||
if err != nil {
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Accept failed: " + err.Error())
|
||||
}
|
||||
|
||||
if isClosedError(err) {
|
||||
return
|
||||
}
|
||||
if !myInfo.IsAlive() {
|
||||
// Then the service port was just closed so the accept failure is to be expected.
|
||||
return
|
||||
}
|
||||
glog.Errorf("Accept failed: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
|
||||
outConn, err := TryConnectEndpoints(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", loadBalancer)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to balancer: %v", err)
|
||||
inConn.Close()
|
||||
continue
|
||||
}
|
||||
// Spin up an async copy loop.
|
||||
go ProxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
|
||||
}
|
||||
}
|
||||
|
||||
// ProxyTCP proxies data bi-directionally between in and out.
|
||||
func ProxyTCP(in, out *net.TCPConn) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
|
||||
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
|
||||
go copyBytes("from backend", in, out, &wg)
|
||||
go copyBytes("to backend", out, in, &wg)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
n, err := io.Copy(dest, src)
|
||||
if err != nil {
|
||||
if !isClosedError(err) {
|
||||
glog.Errorf("I/O error: %v", err)
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
dest.Close()
|
||||
src.Close()
|
||||
}
|
||||
|
||||
// udpProxySocket implements ProxySocket. Close() is implemented by net.UDPConn. When Close() is called,
|
||||
// no new connections are allowed and existing connections are broken.
|
||||
// TODO: We could lame-duck this ourselves, if it becomes important.
|
||||
type udpProxySocket struct {
|
||||
*net.UDPConn
|
||||
port int
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) ListenPort() int {
|
||||
return udp.port
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) Addr() net.Addr {
|
||||
return udp.LocalAddr()
|
||||
}
|
||||
|
||||
// Holds all the known UDP clients that have not timed out.
|
||||
type ClientCache struct {
|
||||
Mu sync.Mutex
|
||||
Clients map[string]net.Conn // addr string -> connection
|
||||
}
|
||||
|
||||
func newClientCache() *ClientCache {
|
||||
return &ClientCache{Clients: map[string]net.Conn{}}
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *ServiceInfo, loadBalancer LoadBalancer) {
|
||||
var buffer [4096]byte // 4KiB should be enough for most whole-packets
|
||||
for {
|
||||
if !myInfo.IsAlive() {
|
||||
// The service port was closed or replaced.
|
||||
break
|
||||
}
|
||||
|
||||
// Block until data arrives.
|
||||
// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
|
||||
n, cliAddr, err := udp.ReadFrom(buffer[0:])
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok {
|
||||
if e.Temporary() {
|
||||
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
|
||||
break
|
||||
}
|
||||
// If this is a client we know already, reuse the connection and goroutine.
|
||||
svrConn, err := udp.getBackendConn(myInfo.ActiveClients, cliAddr, loadBalancer, service, myInfo.Timeout)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// TODO: It would be nice to let the goroutine handle this write, but we don't
|
||||
// really want to copy the buffer. We could do a pool of buffers or something.
|
||||
_, err = svrConn.Write(buffer[0:n])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Write failed: %v", err)
|
||||
// TODO: Maybe tear down the goroutine for this client/server pair?
|
||||
}
|
||||
continue
|
||||
}
|
||||
err = svrConn.SetDeadline(time.Now().Add(myInfo.Timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) getBackendConn(activeClients *ClientCache, cliAddr net.Addr, loadBalancer LoadBalancer, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) {
|
||||
activeClients.Mu.Lock()
|
||||
defer activeClients.Mu.Unlock()
|
||||
|
||||
svrConn, found := activeClients.Clients[cliAddr.String()]
|
||||
if !found {
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
glog.V(3).Infof("New UDP connection from %s", cliAddr)
|
||||
var err error
|
||||
svrConn, err = TryConnectEndpoints(service, cliAddr, "udp", loadBalancer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
activeClients.Clients[cliAddr.String()] = svrConn
|
||||
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) {
|
||||
defer runtime.HandleCrash()
|
||||
udp.proxyClient(cliAddr, svrConn, activeClients, timeout)
|
||||
}(cliAddr, svrConn, activeClients, timeout)
|
||||
}
|
||||
return svrConn, nil
|
||||
}
|
||||
|
||||
// This function is expected to be called as a goroutine.
|
||||
// TODO: Track and log bytes copied, like TCP
|
||||
func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) {
|
||||
defer svrConn.Close()
|
||||
var buffer [4096]byte
|
||||
for {
|
||||
n, err := svrConn.Read(buffer[0:])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Read failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
err = svrConn.SetDeadline(time.Now().Add(timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
break
|
||||
}
|
||||
n, err = udp.WriteTo(buffer[0:n], cliAddr)
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("WriteTo failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
activeClients.Mu.Lock()
|
||||
delete(activeClients.Clients, cliAddr.String())
|
||||
activeClients.Mu.Unlock()
|
||||
}
|
25
vendor/k8s.io/kubernetes/pkg/proxy/userspace/rlimit.go
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/pkg/proxy/userspace/rlimit.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func setRLimit(limit uint64) error {
|
||||
return unix.Setrlimit(unix.RLIMIT_NOFILE, &unix.Rlimit{Max: limit, Cur: limit})
|
||||
}
|
23
vendor/k8s.io/kubernetes/pkg/proxy/userspace/rlimit_windows.go
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/pkg/proxy/userspace/rlimit_windows.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
func setRLimit(limit uint64) error {
|
||||
return nil
|
||||
}
|
386
vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go
generated
vendored
Normal file
386
vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go
generated
vendored
Normal file
@ -0,0 +1,386 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMissingServiceEntry = errors.New("missing service entry")
|
||||
ErrMissingEndpoints = errors.New("missing endpoints")
|
||||
)
|
||||
|
||||
type affinityState struct {
|
||||
clientIP string
|
||||
//clientProtocol api.Protocol //not yet used
|
||||
//sessionCookie string //not yet used
|
||||
endpoint string
|
||||
lastUsed time.Time
|
||||
}
|
||||
|
||||
type affinityPolicy struct {
|
||||
affinityType api.ServiceAffinity
|
||||
affinityMap map[string]*affinityState // map client IP -> affinity info
|
||||
ttlSeconds int
|
||||
}
|
||||
|
||||
// LoadBalancerRR is a round-robin load balancer.
|
||||
type LoadBalancerRR struct {
|
||||
lock sync.RWMutex
|
||||
services map[proxy.ServicePortName]*balancerState
|
||||
}
|
||||
|
||||
// Ensure this implements LoadBalancer.
|
||||
var _ LoadBalancer = &LoadBalancerRR{}
|
||||
|
||||
type balancerState struct {
|
||||
endpoints []string // a list of "ip:port" style strings
|
||||
index int // current index into endpoints
|
||||
affinity affinityPolicy
|
||||
}
|
||||
|
||||
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlSeconds int) *affinityPolicy {
|
||||
return &affinityPolicy{
|
||||
affinityType: affinityType,
|
||||
affinityMap: make(map[string]*affinityState),
|
||||
ttlSeconds: ttlSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLoadBalancerRR returns a new LoadBalancerRR.
|
||||
func NewLoadBalancerRR() *LoadBalancerRR {
|
||||
return &LoadBalancerRR{
|
||||
services: map[proxy.ServicePortName]*balancerState{},
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) error {
|
||||
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This assumes that lb.lock is already held.
|
||||
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) *balancerState {
|
||||
if ttlSeconds == 0 {
|
||||
ttlSeconds = int(api.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
|
||||
}
|
||||
|
||||
if _, exists := lb.services[svcPort]; !exists {
|
||||
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
|
||||
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
|
||||
} else if affinityType != "" {
|
||||
lb.services[svcPort].affinity.affinityType = affinityType
|
||||
}
|
||||
return lb.services[svcPort]
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
|
||||
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
delete(lb.services, svcPort)
|
||||
}
|
||||
|
||||
// return true if this service is using some form of session affinity.
|
||||
func isSessionAffinity(affinity *affinityPolicy) bool {
|
||||
// Should never be empty string, but checking for it to be safe.
|
||||
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ServiceHasEndpoints checks whether a service entry has endpoints.
|
||||
func (lb *LoadBalancerRR) ServiceHasEndpoints(svcPort proxy.ServicePortName) bool {
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
state, exists := lb.services[svcPort]
|
||||
// TODO: while nothing ever assigns nil to the map, *some* of the code using the map
|
||||
// checks for it. The code should all follow the same convention.
|
||||
return exists && state != nil && len(state.endpoints) > 0
|
||||
}
|
||||
|
||||
// NextEndpoint returns a service endpoint.
|
||||
// The service endpoint is chosen using the round-robin algorithm.
|
||||
func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) {
|
||||
// Coarse locking is simple. We can get more fine-grained if/when we
|
||||
// can prove it matters.
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists || state == nil {
|
||||
return "", ErrMissingServiceEntry
|
||||
}
|
||||
if len(state.endpoints) == 0 {
|
||||
return "", ErrMissingEndpoints
|
||||
}
|
||||
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
|
||||
|
||||
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
|
||||
|
||||
var ipaddr string
|
||||
if sessionAffinityEnabled {
|
||||
// Caution: don't shadow ipaddr
|
||||
var err error
|
||||
ipaddr, _, err = net.SplitHostPort(srcAddr.String())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err)
|
||||
}
|
||||
if !sessionAffinityReset {
|
||||
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
|
||||
if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
|
||||
// Affinity wins.
|
||||
endpoint := sessionAffinity.endpoint
|
||||
sessionAffinity.lastUsed = time.Now()
|
||||
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
|
||||
return endpoint, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// Take the next endpoint.
|
||||
endpoint := state.endpoints[state.index]
|
||||
state.index = (state.index + 1) % len(state.endpoints)
|
||||
|
||||
if sessionAffinityEnabled {
|
||||
var affinity *affinityState
|
||||
affinity = state.affinity.affinityMap[ipaddr]
|
||||
if affinity == nil {
|
||||
affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()}
|
||||
state.affinity.affinityMap[ipaddr] = affinity
|
||||
}
|
||||
affinity.lastUsed = time.Now()
|
||||
affinity.endpoint = endpoint
|
||||
affinity.clientIP = ipaddr
|
||||
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
type hostPortPair struct {
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
func isValidEndpoint(hpp *hostPortPair) bool {
|
||||
return hpp.host != "" && hpp.port > 0
|
||||
}
|
||||
|
||||
func flattenValidEndpoints(endpoints []hostPortPair) []string {
|
||||
// Convert Endpoint objects into strings for easier use later. Ignore
|
||||
// the protocol field - we'll get that from the Service objects.
|
||||
var result []string
|
||||
for i := range endpoints {
|
||||
hpp := &endpoints[i]
|
||||
if isValidEndpoint(hpp) {
|
||||
result = append(result, net.JoinHostPort(hpp.host, strconv.Itoa(hpp.port)))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
|
||||
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
|
||||
for _, affinity := range state.affinity.affinityMap {
|
||||
if affinity.endpoint == endpoint {
|
||||
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
|
||||
delete(state.affinity.affinityMap, affinity.clientIP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
|
||||
// Then remove any session affinity records that are not in both lists.
|
||||
// This assumes the lb.lock is held.
|
||||
func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) {
|
||||
allEndpoints := map[string]int{}
|
||||
for _, newEndpoint := range newEndpoints {
|
||||
allEndpoints[newEndpoint] = 1
|
||||
}
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
for _, existingEndpoint := range state.endpoints {
|
||||
allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1
|
||||
}
|
||||
for mKey, mVal := range allEndpoints {
|
||||
if mVal == 1 {
|
||||
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
|
||||
removeSessionAffinityByEndpoint(state, svcPort, mKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildPortsToEndpointsMap builds a map of portname -> all ip:ports for that
|
||||
// portname. Expode Endpoints.Subsets[*] into this structure.
|
||||
func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPair {
|
||||
portsToEndpoints := map[string][]hostPortPair{}
|
||||
for i := range endpoints.Subsets {
|
||||
ss := &endpoints.Subsets[i]
|
||||
for i := range ss.Ports {
|
||||
port := &ss.Ports[i]
|
||||
for i := range ss.Addresses {
|
||||
addr := &ss.Addresses[i]
|
||||
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)})
|
||||
// Ignore the protocol field - we'll get that from the Service objects.
|
||||
}
|
||||
}
|
||||
}
|
||||
return portsToEndpoints
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
|
||||
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
|
||||
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
||||
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
|
||||
state, exists := lb.services[svcPort]
|
||||
|
||||
if !exists || state == nil || len(newEndpoints) > 0 {
|
||||
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
||||
// OnEndpointsAdd can be called without NewService being called externally.
|
||||
// To be safe we will call it here. A new service will only be created
|
||||
// if one does not already exist. The affinity will be updated
|
||||
// later, once NewService is called.
|
||||
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
|
||||
state.endpoints = slice.ShuffleStrings(newEndpoints)
|
||||
|
||||
// Reset the round-robin index.
|
||||
state.index = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
|
||||
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
|
||||
oldPortsToEndpoints := buildPortsToEndpointsMap(oldEndpoints)
|
||||
registeredEndpoints := make(map[proxy.ServicePortName]bool)
|
||||
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
||||
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
|
||||
state, exists := lb.services[svcPort]
|
||||
|
||||
curEndpoints := []string{}
|
||||
if state != nil {
|
||||
curEndpoints = state.endpoints
|
||||
}
|
||||
|
||||
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
|
||||
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
||||
// OnEndpointsUpdate can be called without NewService being called externally.
|
||||
// To be safe we will call it here. A new service will only be created
|
||||
// if one does not already exist. The affinity will be updated
|
||||
// later, once NewService is called.
|
||||
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
|
||||
state.endpoints = slice.ShuffleStrings(newEndpoints)
|
||||
|
||||
// Reset the round-robin index.
|
||||
state.index = 0
|
||||
}
|
||||
registeredEndpoints[svcPort] = true
|
||||
}
|
||||
|
||||
// Now remove all endpoints missing from the update.
|
||||
for portname := range oldPortsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: oldEndpoints.Namespace, Name: oldEndpoints.Name}, Port: portname}
|
||||
if _, exists := registeredEndpoints[svcPort]; !exists {
|
||||
lb.resetService(svcPort)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) {
|
||||
// If the service is still around, reset but don't delete.
|
||||
if state, ok := lb.services[svcPort]; ok {
|
||||
if len(state.endpoints) > 0 {
|
||||
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
|
||||
state.endpoints = []string{}
|
||||
}
|
||||
state.index = 0
|
||||
state.affinity.affinityMap = map[string]*affinityState{}
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
|
||||
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
|
||||
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
||||
lb.resetService(svcPort)
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsSynced() {
|
||||
}
|
||||
|
||||
// Tests whether two slices are equivalent. This sorts both slices in-place.
|
||||
func slicesEquiv(lhs, rhs []string) bool {
|
||||
if len(lhs) != len(rhs) {
|
||||
return false
|
||||
}
|
||||
if reflect.DeepEqual(slice.SortStrings(lhs), slice.SortStrings(rhs)) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) {
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
for ip, affinity := range state.affinity.affinityMap {
|
||||
if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
|
||||
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
|
||||
delete(state.affinity.affinityMap, ip)
|
||||
}
|
||||
}
|
||||
}
|
717
vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin_test.go
generated
vendored
Normal file
717
vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin_test.go
generated
vendored
Normal file
@ -0,0 +1,717 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
func TestValidateWorks(t *testing.T) {
|
||||
if isValidEndpoint(&hostPortPair{}) {
|
||||
t.Errorf("Didn't fail for empty set")
|
||||
}
|
||||
if isValidEndpoint(&hostPortPair{host: "foobar"}) {
|
||||
t.Errorf("Didn't fail with invalid port")
|
||||
}
|
||||
if isValidEndpoint(&hostPortPair{host: "foobar", port: -1}) {
|
||||
t.Errorf("Didn't fail with a negative port")
|
||||
}
|
||||
if !isValidEndpoint(&hostPortPair{host: "foobar", port: 8080}) {
|
||||
t.Errorf("Failed a valid config.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterWorks(t *testing.T) {
|
||||
endpoints := []hostPortPair{
|
||||
{host: "foobar", port: 1},
|
||||
{host: "foobar", port: 2},
|
||||
{host: "foobar", port: -1},
|
||||
{host: "foobar", port: 3},
|
||||
{host: "foobar", port: -2},
|
||||
}
|
||||
filtered := flattenValidEndpoints(endpoints)
|
||||
|
||||
if len(filtered) != 3 {
|
||||
t.Errorf("Failed to filter to the correct size")
|
||||
}
|
||||
if filtered[0] != "foobar:1" {
|
||||
t.Errorf("Index zero is not foobar:1")
|
||||
}
|
||||
if filtered[1] != "foobar:2" {
|
||||
t.Errorf("Index one is not foobar:2")
|
||||
}
|
||||
if filtered[2] != "foobar:3" {
|
||||
t.Errorf("Index two is not foobar:3")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
if len(endpoint) != 0 {
|
||||
t.Errorf("Got an endpoint")
|
||||
}
|
||||
}
|
||||
|
||||
func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
||||
}
|
||||
if endpoint != expected {
|
||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
||||
}
|
||||
if endpoint != expected {
|
||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
|
||||
}},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
}
|
||||
|
||||
func stringsInSlice(haystack []string, needles ...string) bool {
|
||||
for _, needle := range needles {
|
||||
found := false
|
||||
for i := range haystack {
|
||||
if haystack[i] == needle {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found == false {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
|
||||
}},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpointsv1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
|
||||
// Then update the configuration with one fewer endpoints, make sure
|
||||
// we start in the beginning again
|
||||
endpointsv2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
|
||||
// Clear endpoints
|
||||
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
||||
|
||||
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
|
||||
},
|
||||
},
|
||||
}
|
||||
endpoints2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints1)
|
||||
loadBalancer.OnEndpointsAdd(endpoints2)
|
||||
shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
||||
|
||||
shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
|
||||
// Then update the configuration by removing foo
|
||||
loadBalancer.OnEndpointsDelete(endpoints1)
|
||||
endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// but bar is still there, and we continue RR from where we left off.
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call NewService() before OnEndpointsUpdate()
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call OnEndpointsUpdate() before NewService()
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0}
|
||||
client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0}
|
||||
client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpointsv1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
client1Endpoint := shuffledEndpoints[0]
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
client2Endpoint := shuffledEndpoints[1]
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
||||
client3Endpoint := shuffledEndpoints[2]
|
||||
|
||||
endpointsv2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
if client1Endpoint == "endpoint:3" {
|
||||
client1Endpoint = shuffledEndpoints[0]
|
||||
} else if client2Endpoint == "endpoint:3" {
|
||||
client2Endpoint = shuffledEndpoints[0]
|
||||
} else if client3Endpoint == "endpoint:3" {
|
||||
client3Endpoint = shuffledEndpoints[0]
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
||||
|
||||
endpointsv3 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpointsv1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
// Then update the configuration with one fewer endpoints, make sure
|
||||
// we start in the beginning again
|
||||
endpointsv2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
|
||||
// Clear endpoints
|
||||
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
||||
|
||||
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
|
||||
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints1)
|
||||
loadBalancer.OnEndpointsAdd(endpoints2)
|
||||
|
||||
shuffledFooEndpoints := loadBalancer.services[fooService].endpoints
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
|
||||
shuffledBarEndpoints := loadBalancer.services[barService].endpoints
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
|
||||
// Then update the configuration by removing foo
|
||||
loadBalancer.OnEndpointsDelete(endpoints1)
|
||||
endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// but bar is still there, and we continue RR from where we left off.
|
||||
shuffledBarEndpoints = loadBalancer.services[barService].endpoints
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call NewService() before OnEndpointsUpdate()
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
||||
}
|
54
vendor/k8s.io/kubernetes/pkg/proxy/userspace/udp_server.go
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/pkg/proxy/userspace/udp_server.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// udpEchoServer is a simple echo server in UDP, intended for testing the proxy.
|
||||
type udpEchoServer struct {
|
||||
net.PacketConn
|
||||
}
|
||||
|
||||
func (r *udpEchoServer) Loop() {
|
||||
var buffer [4096]byte
|
||||
for {
|
||||
n, cliAddr, err := r.ReadFrom(buffer[0:])
|
||||
if err != nil {
|
||||
fmt.Printf("ReadFrom failed: %v\n", err)
|
||||
continue
|
||||
}
|
||||
r.WriteTo(buffer[0:n], cliAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func newUDPEchoServer() (*udpEchoServer, error) {
|
||||
packetconn, err := net.ListenPacket("udp", ":0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpEchoServer{packetconn}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
func main() {
|
||||
r,_ := newUDPEchoServer()
|
||||
r.Loop()
|
||||
}
|
||||
*/
|
53
vendor/k8s.io/kubernetes/pkg/proxy/util/BUILD
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/pkg/proxy/util/BUILD
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"conntrack.go",
|
||||
"endpoints.go",
|
||||
"port.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/util",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"conntrack_test.go",
|
||||
"endpoints_test.go",
|
||||
"port_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/util",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
105
vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go
generated
vendored
Normal file
105
vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
// Utilities for dealing with conntrack
|
||||
|
||||
const NoConnectionToDelete = "0 flow entries have been deleted"
|
||||
|
||||
func IsIPv6(netIP net.IP) bool {
|
||||
return netIP != nil && netIP.To4() == nil
|
||||
}
|
||||
|
||||
func IsIPv6String(ip string) bool {
|
||||
netIP := net.ParseIP(ip)
|
||||
return IsIPv6(netIP)
|
||||
}
|
||||
|
||||
func parametersWithFamily(isIPv6 bool, parameters ...string) []string {
|
||||
if isIPv6 {
|
||||
parameters = append(parameters, "-f", "ipv6")
|
||||
}
|
||||
return parameters
|
||||
}
|
||||
|
||||
// ClearUDPConntrackForIP uses the conntrack tool to delete the conntrack entries
|
||||
// for the UDP connections specified by the given service IP
|
||||
func ClearUDPConntrackForIP(execer exec.Interface, ip string) error {
|
||||
parameters := parametersWithFamily(IsIPv6String(ip), "-D", "--orig-dst", ip, "-p", "udp")
|
||||
err := ExecConntrackTool(execer, parameters...)
|
||||
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
|
||||
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
|
||||
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
|
||||
// is expensive to baby-sit all udp connections to kubernetes services.
|
||||
return fmt.Errorf("error deleting connection tracking state for UDP service IP: %s, error: %v", ip, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecConntrackTool executes the conntrack tool using the given parameters
|
||||
func ExecConntrackTool(execer exec.Interface, parameters ...string) error {
|
||||
conntrackPath, err := execer.LookPath("conntrack")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error looking for path of conntrack: %v", err)
|
||||
}
|
||||
output, err := execer.Command(conntrackPath, parameters...).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("conntrack command returned: %q, error message: %s", string(output), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearUDPConntrackForPort uses the conntrack tool to delete the conntrack entries
|
||||
// for the UDP connections specified by the port.
|
||||
// When a packet arrives, it will not go through NAT table again, because it is not "the first" packet.
|
||||
// The solution is clearing the conntrack. Known issues:
|
||||
// https://github.com/docker/docker/issues/8795
|
||||
// https://github.com/kubernetes/kubernetes/issues/31983
|
||||
func ClearUDPConntrackForPort(execer exec.Interface, port int, isIPv6 bool) error {
|
||||
if port <= 0 {
|
||||
return fmt.Errorf("Wrong port number. The port number must be greater than zero")
|
||||
}
|
||||
parameters := parametersWithFamily(isIPv6, "-D", "-p", "udp", "--dport", strconv.Itoa(port))
|
||||
err := ExecConntrackTool(execer, parameters...)
|
||||
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
|
||||
return fmt.Errorf("error deleting conntrack entries for UDP port: %d, error: %v", port, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearUDPConntrackForPeers uses the conntrack tool to delete the conntrack entries
|
||||
// for the UDP connections specified by the {origin, dest} IP pair.
|
||||
func ClearUDPConntrackForPeers(execer exec.Interface, origin, dest string) error {
|
||||
parameters := parametersWithFamily(IsIPv6String(origin), "-D", "--orig-dst", origin, "--dst-nat", dest, "-p", "udp")
|
||||
err := ExecConntrackTool(execer, parameters...)
|
||||
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
|
||||
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
|
||||
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
|
||||
// is expensive to baby sit all udp connections to kubernetes services.
|
||||
return fmt.Errorf("error deleting conntrack entries for UDP peer {%s, %s}, error: %v", origin, dest, err)
|
||||
}
|
||||
return nil
|
||||
}
|
331
vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack_test.go
generated
vendored
Normal file
331
vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack_test.go
generated
vendored
Normal file
@ -0,0 +1,331 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/utils/exec"
|
||||
fakeexec "k8s.io/utils/exec/testing"
|
||||
)
|
||||
|
||||
func familyParamStr(isIPv6 bool) string {
|
||||
if isIPv6 {
|
||||
return " -f ipv6"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func TestExecConntrackTool(t *testing.T) {
|
||||
fcmd := fakeexec.FakeCmd{
|
||||
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
|
||||
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
|
||||
func() ([]byte, error) {
|
||||
return []byte(""), fmt.Errorf("conntrack v1.4.2 (conntrack-tools): 0 flow entries have been deleted")
|
||||
},
|
||||
},
|
||||
}
|
||||
fexec := fakeexec.FakeExec{
|
||||
CommandScript: []fakeexec.FakeCommandAction{
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
},
|
||||
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
||||
}
|
||||
|
||||
testCases := [][]string{
|
||||
{"-L", "-p", "udp"},
|
||||
{"-D", "-p", "udp", "-d", "10.0.240.1"},
|
||||
{"-D", "-p", "udp", "--orig-dst", "10.240.0.2", "--dst-nat", "10.0.10.2"},
|
||||
}
|
||||
|
||||
expectErr := []bool{false, false, true}
|
||||
|
||||
for i := range testCases {
|
||||
err := ExecConntrackTool(&fexec, testCases[i]...)
|
||||
|
||||
if expectErr[i] {
|
||||
if err == nil {
|
||||
t.Errorf("expected err, got %v", err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("expected success, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
execCmd := strings.Join(fcmd.CombinedOutputLog[i], " ")
|
||||
expectCmd := fmt.Sprintf("%s %s", "conntrack", strings.Join(testCases[i], " "))
|
||||
|
||||
if execCmd != expectCmd {
|
||||
t.Errorf("expect execute command: %s, but got: %s", expectCmd, execCmd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearUDPConntrackForIP(t *testing.T) {
|
||||
fcmd := fakeexec.FakeCmd{
|
||||
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
|
||||
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
|
||||
func() ([]byte, error) {
|
||||
return []byte(""), fmt.Errorf("conntrack v1.4.2 (conntrack-tools): 0 flow entries have been deleted")
|
||||
},
|
||||
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
|
||||
},
|
||||
}
|
||||
fexec := fakeexec.FakeExec{
|
||||
CommandScript: []fakeexec.FakeCommandAction{
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
},
|
||||
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
ip string
|
||||
}{
|
||||
{"IPv4 success", "10.240.0.3"},
|
||||
{"IPv4 success", "10.240.0.5"},
|
||||
{"IPv4 simulated error", "10.240.0.4"},
|
||||
{"IPv6 success", "2001:db8::10"},
|
||||
}
|
||||
|
||||
svcCount := 0
|
||||
for _, tc := range testCases {
|
||||
if err := ClearUDPConntrackForIP(&fexec, tc.ip); err != nil {
|
||||
t.Errorf("%s test case:, Unexpected error: %v", tc.name, err)
|
||||
}
|
||||
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s -p udp", tc.ip) + familyParamStr(IsIPv6String(tc.ip))
|
||||
execCommand := strings.Join(fcmd.CombinedOutputLog[svcCount], " ")
|
||||
if expectCommand != execCommand {
|
||||
t.Errorf("%s test case: Expect command: %s, but executed %s", tc.name, expectCommand, execCommand)
|
||||
}
|
||||
svcCount++
|
||||
}
|
||||
if svcCount != fexec.CommandCalls {
|
||||
t.Errorf("Expect command executed %d times, but got %d", svcCount, fexec.CommandCalls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearUDPConntrackForPort(t *testing.T) {
|
||||
fcmd := fakeexec.FakeCmd{
|
||||
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
|
||||
func() ([]byte, error) {
|
||||
return []byte(""), fmt.Errorf("conntrack v1.4.2 (conntrack-tools): 0 flow entries have been deleted")
|
||||
},
|
||||
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
|
||||
},
|
||||
}
|
||||
fexec := fakeexec.FakeExec{
|
||||
CommandScript: []fakeexec.FakeCommandAction{
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
},
|
||||
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
port int
|
||||
isIPv6 bool
|
||||
}{
|
||||
{"IPv4, no error", 8080, false},
|
||||
{"IPv4, simulated error", 9090, false},
|
||||
{"IPv6, no error", 6666, true},
|
||||
}
|
||||
svcCount := 0
|
||||
for _, tc := range testCases {
|
||||
err := ClearUDPConntrackForPort(&fexec, tc.port, tc.isIPv6)
|
||||
if err != nil {
|
||||
t.Errorf("%s test case: Unexpected error: %v", tc.name, err)
|
||||
}
|
||||
expectCommand := fmt.Sprintf("conntrack -D -p udp --dport %d", tc.port) + familyParamStr(tc.isIPv6)
|
||||
execCommand := strings.Join(fcmd.CombinedOutputLog[svcCount], " ")
|
||||
if expectCommand != execCommand {
|
||||
t.Errorf("%s test case: Expect command: %s, but executed %s", tc.name, expectCommand, execCommand)
|
||||
}
|
||||
svcCount++
|
||||
}
|
||||
if svcCount != fexec.CommandCalls {
|
||||
t.Errorf("Expect command executed %d times, but got %d", svcCount, fexec.CommandCalls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteUDPConnections(t *testing.T) {
|
||||
fcmd := fakeexec.FakeCmd{
|
||||
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
|
||||
func() ([]byte, error) {
|
||||
return []byte(""), fmt.Errorf("conntrack v1.4.2 (conntrack-tools): 0 flow entries have been deleted")
|
||||
},
|
||||
func() ([]byte, error) { return []byte("1 flow entries have been deleted"), nil },
|
||||
},
|
||||
}
|
||||
fexec := fakeexec.FakeExec{
|
||||
CommandScript: []fakeexec.FakeCommandAction{
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
|
||||
},
|
||||
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
origin string
|
||||
dest string
|
||||
}{
|
||||
{
|
||||
name: "IPv4 success",
|
||||
origin: "1.2.3.4",
|
||||
dest: "10.20.30.40",
|
||||
},
|
||||
{
|
||||
name: "IPv4 simulated failure",
|
||||
origin: "2.3.4.5",
|
||||
dest: "20.30.40.50",
|
||||
},
|
||||
{
|
||||
name: "IPv6 success",
|
||||
origin: "fd00::600d:f00d",
|
||||
dest: "2001:db8::5",
|
||||
},
|
||||
}
|
||||
svcCount := 0
|
||||
for i, tc := range testCases {
|
||||
err := ClearUDPConntrackForPeers(&fexec, tc.origin, tc.dest)
|
||||
if err != nil {
|
||||
t.Errorf("%s test case: unexpected error: %v", tc.name, err)
|
||||
}
|
||||
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p udp", tc.origin, tc.dest) + familyParamStr(IsIPv6String(tc.origin))
|
||||
execCommand := strings.Join(fcmd.CombinedOutputLog[i], " ")
|
||||
if expectCommand != execCommand {
|
||||
t.Errorf("%s test case: Expect command: %s, but executed %s", tc.name, expectCommand, execCommand)
|
||||
}
|
||||
svcCount++
|
||||
}
|
||||
if svcCount != fexec.CommandCalls {
|
||||
t.Errorf("Expect command executed %d times, but got %d", svcCount, fexec.CommandCalls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsIPv6String(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ip string
|
||||
expectIPv6 bool
|
||||
}{
|
||||
{
|
||||
ip: "127.0.0.1",
|
||||
expectIPv6: false,
|
||||
},
|
||||
{
|
||||
ip: "192.168.0.0",
|
||||
expectIPv6: false,
|
||||
},
|
||||
{
|
||||
ip: "1.2.3.4",
|
||||
expectIPv6: false,
|
||||
},
|
||||
{
|
||||
ip: "bad ip",
|
||||
expectIPv6: false,
|
||||
},
|
||||
{
|
||||
ip: "::1",
|
||||
expectIPv6: true,
|
||||
},
|
||||
{
|
||||
ip: "fd00::600d:f00d",
|
||||
expectIPv6: true,
|
||||
},
|
||||
{
|
||||
ip: "2001:db8::5",
|
||||
expectIPv6: true,
|
||||
},
|
||||
}
|
||||
for i := range testCases {
|
||||
isIPv6 := IsIPv6String(testCases[i].ip)
|
||||
if isIPv6 != testCases[i].expectIPv6 {
|
||||
t.Errorf("[%d] Expect ipv6 %v, got %v", i+1, testCases[i].expectIPv6, isIPv6)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsIPv6(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ip net.IP
|
||||
expectIPv6 bool
|
||||
}{
|
||||
{
|
||||
ip: net.IPv4zero,
|
||||
expectIPv6: false,
|
||||
},
|
||||
{
|
||||
ip: net.IPv4bcast,
|
||||
expectIPv6: false,
|
||||
},
|
||||
{
|
||||
ip: net.ParseIP("127.0.0.1"),
|
||||
expectIPv6: false,
|
||||
},
|
||||
{
|
||||
ip: net.ParseIP("10.20.40.40"),
|
||||
expectIPv6: false,
|
||||
},
|
||||
{
|
||||
ip: net.ParseIP("172.17.3.0"),
|
||||
expectIPv6: false,
|
||||
},
|
||||
{
|
||||
ip: nil,
|
||||
expectIPv6: false,
|
||||
},
|
||||
{
|
||||
ip: net.IPv6loopback,
|
||||
expectIPv6: true,
|
||||
},
|
||||
{
|
||||
ip: net.IPv6zero,
|
||||
expectIPv6: true,
|
||||
},
|
||||
{
|
||||
ip: net.ParseIP("fd00::600d:f00d"),
|
||||
expectIPv6: true,
|
||||
},
|
||||
{
|
||||
ip: net.ParseIP("2001:db8::5"),
|
||||
expectIPv6: true,
|
||||
},
|
||||
}
|
||||
for i := range testCases {
|
||||
isIPv6 := IsIPv6(testCases[i].ip)
|
||||
if isIPv6 != testCases[i].expectIPv6 {
|
||||
t.Errorf("[%d] Expect ipv6 %v, got %v", i+1, testCases[i].expectIPv6, isIPv6)
|
||||
}
|
||||
}
|
||||
}
|
73
vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go
generated
vendored
Normal file
73
vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP
|
||||
// part is an IPv6 address enclosed in brackets (e.g. "[fd00:1::5]:9999"),
|
||||
// then the brackets are stripped as well.
|
||||
func IPPart(s string) string {
|
||||
if ip := net.ParseIP(s); ip != nil {
|
||||
// IP address without port
|
||||
return s
|
||||
}
|
||||
// Must be IP:port
|
||||
host, _, err := net.SplitHostPort(s)
|
||||
if err != nil {
|
||||
glog.Errorf("Error parsing '%s': %v", s, err)
|
||||
return ""
|
||||
}
|
||||
// Check if host string is a valid IP address
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
return ip.String()
|
||||
} else {
|
||||
glog.Errorf("invalid IP part '%s'", host)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func PortPart(s string) (int, error) {
|
||||
// Must be IP:port
|
||||
_, port, err := net.SplitHostPort(s)
|
||||
if err != nil {
|
||||
glog.Errorf("Error parsing '%s': %v", s, err)
|
||||
return -1, err
|
||||
}
|
||||
portNumber, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
glog.Errorf("Error parsing '%s': %v", port, err)
|
||||
return -1, err
|
||||
}
|
||||
return portNumber, nil
|
||||
}
|
||||
|
||||
// ToCIDR returns a host address of the form <ip-address>/32 for
|
||||
// IPv4 and <ip-address>/128 for IPv6
|
||||
func ToCIDR(ip net.IP) string {
|
||||
len := 32
|
||||
if ip.To4() == nil {
|
||||
len = 128
|
||||
}
|
||||
return fmt.Sprintf("%s/%d", ip.String(), len)
|
||||
}
|
69
vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints_test.go
generated
vendored
Normal file
69
vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints_test.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIPPart(t *testing.T) {
|
||||
const noError = ""
|
||||
|
||||
testCases := []struct {
|
||||
endpoint string
|
||||
expectedIP string
|
||||
expectedError string
|
||||
}{
|
||||
{"1.2.3.4", "1.2.3.4", noError},
|
||||
{"1.2.3.4:9999", "1.2.3.4", noError},
|
||||
{"2001:db8::1:1", "2001:db8::1:1", noError},
|
||||
{"[2001:db8::2:2]:9999", "2001:db8::2:2", noError},
|
||||
{"1.2.3.4::9999", "", "too many colons"},
|
||||
{"1.2.3.4:[0]", "", "unexpected '[' in address"},
|
||||
{"1.2.3:8080", "", "invalid ip part"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
ip := IPPart(tc.endpoint)
|
||||
if tc.expectedError == noError {
|
||||
if ip != tc.expectedIP {
|
||||
t.Errorf("Unexpected IP for %s: Expected: %s, Got %s", tc.endpoint, tc.expectedIP, ip)
|
||||
}
|
||||
} else if ip != "" {
|
||||
t.Errorf("Error did not occur for %s, expected: '%s' error", tc.endpoint, tc.expectedError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToCIDR(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ip string
|
||||
expectedAddr string
|
||||
}{
|
||||
{"1.2.3.4", "1.2.3.4/32"},
|
||||
{"2001:db8::1:1", "2001:db8::1:1/128"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
ip := net.ParseIP(tc.ip)
|
||||
addr := ToCIDR(ip)
|
||||
if addr != tc.expectedAddr {
|
||||
t.Errorf("Unexpected host address for %s: Expected: %s, Got %s", tc.ip, tc.expectedAddr, addr)
|
||||
}
|
||||
}
|
||||
}
|
67
vendor/k8s.io/kubernetes/pkg/proxy/util/port.go
generated
vendored
Normal file
67
vendor/k8s.io/kubernetes/pkg/proxy/util/port.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// LocalPort describes a port on specific IP address and protocol
|
||||
type LocalPort struct {
|
||||
// Description is the identity message of a given local port.
|
||||
Description string
|
||||
// IP is the IP address part of a given local port.
|
||||
// If this string is empty, the port binds to all local IP addresses.
|
||||
IP string
|
||||
// Port is the port part of a given local port.
|
||||
Port int
|
||||
// Protocol is the protocol part of a given local port.
|
||||
// The value is assumed to be lower-case. For example, "udp" not "UDP", "tcp" not "TCP".
|
||||
Protocol string
|
||||
}
|
||||
|
||||
func (lp *LocalPort) String() string {
|
||||
ipPort := net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port))
|
||||
return fmt.Sprintf("%q (%s/%s)", lp.Description, ipPort, lp.Protocol)
|
||||
}
|
||||
|
||||
// Closeable is an interface around closing an port.
|
||||
type Closeable interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// PortOpener is an interface around port opening/closing.
|
||||
// Abstracted out for testing.
|
||||
type PortOpener interface {
|
||||
OpenLocalPort(lp *LocalPort) (Closeable, error)
|
||||
}
|
||||
|
||||
// RevertPorts is closing ports in replacementPortsMap but not in originalPortsMap. In other words, it only
|
||||
// closes the ports opened in this sync.
|
||||
func RevertPorts(replacementPortsMap, originalPortsMap map[LocalPort]Closeable) {
|
||||
for k, v := range replacementPortsMap {
|
||||
// Only close newly opened local ports - leave ones that were open before this update
|
||||
if originalPortsMap[k] == nil {
|
||||
glog.V(2).Infof("Closing local port %s", k.String())
|
||||
v.Close()
|
||||
}
|
||||
}
|
||||
}
|
143
vendor/k8s.io/kubernetes/pkg/proxy/util/port_test.go
generated
vendored
Normal file
143
vendor/k8s.io/kubernetes/pkg/proxy/util/port_test.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import "testing"
|
||||
|
||||
type fakeClosable struct {
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (c *fakeClosable) Close() error {
|
||||
c.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestLocalPortString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
ip string
|
||||
port int
|
||||
protocol string
|
||||
expectedStr string
|
||||
}{
|
||||
{"IPv4 UDP", "1.2.3.4", 9999, "udp", "\"IPv4 UDP\" (1.2.3.4:9999/udp)"},
|
||||
{"IPv4 TCP", "5.6.7.8", 1053, "tcp", "\"IPv4 TCP\" (5.6.7.8:1053/tcp)"},
|
||||
{"IPv6 TCP", "2001:db8::1", 80, "tcp", "\"IPv6 TCP\" ([2001:db8::1]:80/tcp)"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
lp := &LocalPort{
|
||||
Description: tc.description,
|
||||
IP: tc.ip,
|
||||
Port: tc.port,
|
||||
Protocol: tc.protocol,
|
||||
}
|
||||
str := lp.String()
|
||||
if str != tc.expectedStr {
|
||||
t.Errorf("Unexpected output for %s, expected: %s, got: %s", tc.description, tc.expectedStr, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRevertPorts(t *testing.T) {
|
||||
testCases := []struct {
|
||||
replacementPorts []LocalPort
|
||||
existingPorts []LocalPort
|
||||
expectToBeClose []bool
|
||||
}{
|
||||
{
|
||||
replacementPorts: []LocalPort{
|
||||
{Port: 5001},
|
||||
{Port: 5002},
|
||||
{Port: 5003},
|
||||
},
|
||||
existingPorts: []LocalPort{},
|
||||
expectToBeClose: []bool{true, true, true},
|
||||
},
|
||||
{
|
||||
replacementPorts: []LocalPort{},
|
||||
existingPorts: []LocalPort{
|
||||
{Port: 5001},
|
||||
{Port: 5002},
|
||||
{Port: 5003},
|
||||
},
|
||||
expectToBeClose: []bool{},
|
||||
},
|
||||
{
|
||||
replacementPorts: []LocalPort{
|
||||
{Port: 5001},
|
||||
{Port: 5002},
|
||||
{Port: 5003},
|
||||
},
|
||||
existingPorts: []LocalPort{
|
||||
{Port: 5001},
|
||||
{Port: 5002},
|
||||
{Port: 5003},
|
||||
},
|
||||
expectToBeClose: []bool{false, false, false},
|
||||
},
|
||||
{
|
||||
replacementPorts: []LocalPort{
|
||||
{Port: 5001},
|
||||
{Port: 5002},
|
||||
{Port: 5003},
|
||||
},
|
||||
existingPorts: []LocalPort{
|
||||
{Port: 5001},
|
||||
{Port: 5003},
|
||||
},
|
||||
expectToBeClose: []bool{false, true, false},
|
||||
},
|
||||
{
|
||||
replacementPorts: []LocalPort{
|
||||
{Port: 5001},
|
||||
{Port: 5002},
|
||||
{Port: 5003},
|
||||
},
|
||||
existingPorts: []LocalPort{
|
||||
{Port: 5001},
|
||||
{Port: 5002},
|
||||
{Port: 5003},
|
||||
{Port: 5004},
|
||||
},
|
||||
expectToBeClose: []bool{false, false, false},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
replacementPortsMap := make(map[LocalPort]Closeable)
|
||||
for _, lp := range tc.replacementPorts {
|
||||
replacementPortsMap[lp] = &fakeClosable{}
|
||||
}
|
||||
existingPortsMap := make(map[LocalPort]Closeable)
|
||||
for _, lp := range tc.existingPorts {
|
||||
existingPortsMap[lp] = &fakeClosable{}
|
||||
}
|
||||
RevertPorts(replacementPortsMap, existingPortsMap)
|
||||
for j, expectation := range tc.expectToBeClose {
|
||||
if replacementPortsMap[tc.replacementPorts[j]].(*fakeClosable).closed != expectation {
|
||||
t.Errorf("Expect replacement localport %v to be %v in test case %v", tc.replacementPorts[j], expectation, i)
|
||||
}
|
||||
}
|
||||
for _, lp := range tc.existingPorts {
|
||||
if existingPortsMap[lp].(*fakeClosable).closed == true {
|
||||
t.Errorf("Expect existing localport %v to be false in test case %v", lp, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
58
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
func IsLocalIP(ip string) (bool, error) {
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range addrs {
|
||||
intf, _, err := net.ParseCIDR(addrs[i].String())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if net.ParseIP(ip).Equal(intf) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func ShouldSkipService(svcName types.NamespacedName, service *api.Service) bool {
|
||||
// if ClusterIP is "None" or empty, skip proxying
|
||||
if !helper.IsServiceIPSet(service) {
|
||||
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
|
||||
return true
|
||||
}
|
||||
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
|
||||
if service.Spec.Type == api.ServiceTypeExternalName {
|
||||
glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
111
vendor/k8s.io/kubernetes/pkg/proxy/util/utils_test.go
generated
vendored
Normal file
111
vendor/k8s.io/kubernetes/pkg/proxy/util/utils_test.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
func TestShouldSkipService(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service *api.Service
|
||||
svcName types.NamespacedName
|
||||
shouldSkip bool
|
||||
}{
|
||||
{
|
||||
// Cluster IP is None
|
||||
service: &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
|
||||
Spec: api.ServiceSpec{
|
||||
ClusterIP: api.ClusterIPNone,
|
||||
},
|
||||
},
|
||||
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
|
||||
shouldSkip: true,
|
||||
},
|
||||
{
|
||||
// Cluster IP is empty
|
||||
service: &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
|
||||
Spec: api.ServiceSpec{
|
||||
ClusterIP: "",
|
||||
},
|
||||
},
|
||||
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
|
||||
shouldSkip: true,
|
||||
},
|
||||
{
|
||||
// ExternalName type service
|
||||
service: &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
|
||||
Spec: api.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Type: api.ServiceTypeExternalName,
|
||||
},
|
||||
},
|
||||
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
|
||||
shouldSkip: true,
|
||||
},
|
||||
{
|
||||
// ClusterIP type service with ClusterIP set
|
||||
service: &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
|
||||
Spec: api.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Type: api.ServiceTypeClusterIP,
|
||||
},
|
||||
},
|
||||
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
|
||||
shouldSkip: false,
|
||||
},
|
||||
{
|
||||
// NodePort type service with ClusterIP set
|
||||
service: &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
|
||||
Spec: api.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Type: api.ServiceTypeNodePort,
|
||||
},
|
||||
},
|
||||
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
|
||||
shouldSkip: false,
|
||||
},
|
||||
{
|
||||
// LoadBalancer type service with ClusterIP set
|
||||
service: &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
|
||||
Spec: api.ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Type: api.ServiceTypeLoadBalancer,
|
||||
},
|
||||
},
|
||||
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
|
||||
shouldSkip: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i := range testCases {
|
||||
skip := ShouldSkipService(testCases[i].svcName, testCases[i].service)
|
||||
if skip != testCases[i].shouldSkip {
|
||||
t.Errorf("case %d: expect %v, got %v", i, testCases[i].shouldSkip, skip)
|
||||
}
|
||||
}
|
||||
}
|
78
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"metrics.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"proxier.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/winkernel",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"//pkg/api/service:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/healthcheck:go_default_library",
|
||||
"//pkg/util/async:go_default_library",
|
||||
"//vendor/github.com/Microsoft/hcsshim:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"proxier_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/winkernel",
|
||||
library = ":go_default_library",
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/async:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
3
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
reviewers:
|
||||
- dineshgovindasamy
|
||||
- madhanrm
|
50
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winkernel
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const kubeProxySubsystem = "kubeproxy"
|
||||
|
||||
var (
|
||||
SyncProxyRulesLatency = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: kubeProxySubsystem,
|
||||
Name: "sync_proxy_rules_latency_microseconds",
|
||||
Help: "SyncProxyRules latency",
|
||||
Buckets: prometheus.ExponentialBuckets(1000, 2, 15),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetricsOnce sync.Once
|
||||
|
||||
func RegisterMetrics() {
|
||||
registerMetricsOnce.Do(func() {
|
||||
prometheus.MustRegister(SyncProxyRulesLatency)
|
||||
})
|
||||
}
|
||||
|
||||
// Gets the time since the specified start in microseconds.
|
||||
func sinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
1134
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go
generated
vendored
Normal file
1134
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2031
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier_test.go
generated
vendored
Normal file
2031
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
65
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD
generated
vendored
Normal file
65
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"loadbalancer.go",
|
||||
"proxier.go",
|
||||
"proxysocket.go",
|
||||
"roundrobin.go",
|
||||
"types.go",
|
||||
"udp_server.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/winuserspace",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/ipconfig:go_default_library",
|
||||
"//pkg/util/netsh:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/miekg/dns:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"proxier_test.go",
|
||||
"roundrobin_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy/winuserspace",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/netsh/testing:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
33
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"net"
|
||||
)
|
||||
|
||||
// LoadBalancer is an interface for distributing incoming requests to service endpoints.
|
||||
type LoadBalancer interface {
|
||||
// NextEndpoint returns the endpoint to handle a request for the given
|
||||
// service-port and source address.
|
||||
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
|
||||
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeMinutes int) error
|
||||
DeleteService(service proxy.ServicePortName)
|
||||
CleanupStaleStickySessions(service proxy.ServicePortName)
|
||||
}
|
481
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go
generated
vendored
Normal file
481
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go
generated
vendored
Normal file
@ -0,0 +1,481 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/netsh"
|
||||
)
|
||||
|
||||
const allAvailableInterfaces string = ""
|
||||
|
||||
type portal struct {
|
||||
ip string
|
||||
port int
|
||||
isExternal bool
|
||||
}
|
||||
|
||||
type serviceInfo struct {
|
||||
isAliveAtomic int32 // Only access this with atomic ops
|
||||
portal portal
|
||||
protocol api.Protocol
|
||||
socket proxySocket
|
||||
timeout time.Duration
|
||||
activeClients *clientCache
|
||||
dnsClients *dnsClientCache
|
||||
sessionAffinityType api.ServiceAffinity
|
||||
}
|
||||
|
||||
func (info *serviceInfo) setAlive(b bool) {
|
||||
var i int32
|
||||
if b {
|
||||
i = 1
|
||||
}
|
||||
atomic.StoreInt32(&info.isAliveAtomic, i)
|
||||
}
|
||||
|
||||
func (info *serviceInfo) isAlive() bool {
|
||||
return atomic.LoadInt32(&info.isAliveAtomic) != 0
|
||||
}
|
||||
|
||||
func logTimeout(err error) bool {
|
||||
if e, ok := err.(net.Error); ok {
|
||||
if e.Timeout() {
|
||||
glog.V(3).Infof("connection to endpoint closed due to inactivity")
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Proxier is a simple proxy for TCP connections between a localhost:lport
|
||||
// and services that provide the actual implementations.
|
||||
type Proxier struct {
|
||||
loadBalancer LoadBalancer
|
||||
mu sync.Mutex // protects serviceMap
|
||||
serviceMap map[ServicePortPortalName]*serviceInfo
|
||||
syncPeriod time.Duration
|
||||
udpIdleTimeout time.Duration
|
||||
portMapMutex sync.Mutex
|
||||
portMap map[portMapKey]*portMapValue
|
||||
numProxyLoops int32 // use atomic ops to access this; mostly for testing
|
||||
netsh netsh.Interface
|
||||
hostIP net.IP
|
||||
}
|
||||
|
||||
// assert Proxier is a ProxyProvider
|
||||
var _ proxy.ProxyProvider = &Proxier{}
|
||||
|
||||
// A key for the portMap. The ip has to be a string because slices can't be map
|
||||
// keys.
|
||||
type portMapKey struct {
|
||||
ip string
|
||||
port int
|
||||
protocol api.Protocol
|
||||
}
|
||||
|
||||
func (k *portMapKey) String() string {
|
||||
return fmt.Sprintf("%s:%d/%s", k.ip, k.port, k.protocol)
|
||||
}
|
||||
|
||||
// A value for the portMap
|
||||
type portMapValue struct {
|
||||
owner ServicePortPortalName
|
||||
socket interface {
|
||||
Close() error
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrProxyOnLocalhost is returned by NewProxier if the user requests a proxier on
|
||||
// the loopback address. May be checked for by callers of NewProxier to know whether
|
||||
// the caller provided invalid input.
|
||||
ErrProxyOnLocalhost = fmt.Errorf("cannot proxy on localhost")
|
||||
)
|
||||
|
||||
// Used below.
|
||||
var localhostIPv4 = net.ParseIP("127.0.0.1")
|
||||
var localhostIPv6 = net.ParseIP("::1")
|
||||
|
||||
// NewProxier returns a new Proxier given a LoadBalancer and an address on
|
||||
// which to listen. It is assumed that there is only a single Proxier active
|
||||
// on a machine. An error will be returned if the proxier cannot be started
|
||||
// due to an invalid ListenIP (loopback)
|
||||
func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interface, pr utilnet.PortRange, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) {
|
||||
if listenIP.Equal(localhostIPv4) || listenIP.Equal(localhostIPv6) {
|
||||
return nil, ErrProxyOnLocalhost
|
||||
}
|
||||
|
||||
hostIP, err := utilnet.ChooseHostInterface()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to select a host interface: %v", err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Setting proxy IP to %v", hostIP)
|
||||
return createProxier(loadBalancer, listenIP, netsh, hostIP, syncPeriod, udpIdleTimeout)
|
||||
}
|
||||
|
||||
func createProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interface, hostIP net.IP, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) {
|
||||
return &Proxier{
|
||||
loadBalancer: loadBalancer,
|
||||
serviceMap: make(map[ServicePortPortalName]*serviceInfo),
|
||||
portMap: make(map[portMapKey]*portMapValue),
|
||||
syncPeriod: syncPeriod,
|
||||
udpIdleTimeout: udpIdleTimeout,
|
||||
netsh: netsh,
|
||||
hostIP: hostIP,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Sync is called to immediately synchronize the proxier state
|
||||
func (proxier *Proxier) Sync() {
|
||||
proxier.cleanupStaleStickySessions()
|
||||
}
|
||||
|
||||
// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return.
|
||||
func (proxier *Proxier) SyncLoop() {
|
||||
t := time.NewTicker(proxier.syncPeriod)
|
||||
defer t.Stop()
|
||||
for {
|
||||
<-t.C
|
||||
glog.V(6).Infof("Periodic sync")
|
||||
proxier.Sync()
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupStaleStickySessions cleans up any stale sticky session records in the hash map.
|
||||
func (proxier *Proxier) cleanupStaleStickySessions() {
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
servicePortNameMap := make(map[proxy.ServicePortName]bool)
|
||||
for name := range proxier.serviceMap {
|
||||
servicePortName := proxy.ServicePortName{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: name.Namespace,
|
||||
Name: name.Name,
|
||||
},
|
||||
Port: name.Port,
|
||||
}
|
||||
if servicePortNameMap[servicePortName] == false {
|
||||
// ensure cleanup sticky sessions only gets called once per serviceportname
|
||||
servicePortNameMap[servicePortName] = true
|
||||
proxier.loadBalancer.CleanupStaleStickySessions(servicePortName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This assumes proxier.mu is not locked.
|
||||
func (proxier *Proxier) stopProxy(service ServicePortPortalName, info *serviceInfo) error {
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
return proxier.stopProxyInternal(service, info)
|
||||
}
|
||||
|
||||
// This assumes proxier.mu is locked.
|
||||
func (proxier *Proxier) stopProxyInternal(service ServicePortPortalName, info *serviceInfo) error {
|
||||
delete(proxier.serviceMap, service)
|
||||
info.setAlive(false)
|
||||
err := info.socket.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
func (proxier *Proxier) getServiceInfo(service ServicePortPortalName) (*serviceInfo, bool) {
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
info, ok := proxier.serviceMap[service]
|
||||
return info, ok
|
||||
}
|
||||
|
||||
func (proxier *Proxier) setServiceInfo(service ServicePortPortalName, info *serviceInfo) {
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
proxier.serviceMap[service] = info
|
||||
}
|
||||
|
||||
// addServicePortPortal starts listening for a new service, returning the serviceInfo.
|
||||
// The timeout only applies to UDP connections, for now.
|
||||
func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol api.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) {
|
||||
var serviceIP net.IP
|
||||
if listenIP != allAvailableInterfaces {
|
||||
if serviceIP = net.ParseIP(listenIP); serviceIP == nil {
|
||||
return nil, fmt.Errorf("could not parse ip '%q'", listenIP)
|
||||
}
|
||||
// add the IP address. Node port binds to all interfaces.
|
||||
args := proxier.netshIpv4AddressAddArgs(serviceIP)
|
||||
if existed, err := proxier.netsh.EnsureIPAddress(args, serviceIP); err != nil {
|
||||
return nil, err
|
||||
} else if !existed {
|
||||
glog.V(3).Infof("Added ip address to fowarder interface for service %q at %s:%d/%s", servicePortPortalName, listenIP, port, protocol)
|
||||
}
|
||||
}
|
||||
|
||||
// add the listener, proxy
|
||||
sock, err := newProxySocket(protocol, serviceIP, port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
si := &serviceInfo{
|
||||
isAliveAtomic: 1,
|
||||
portal: portal{
|
||||
ip: listenIP,
|
||||
port: port,
|
||||
isExternal: false,
|
||||
},
|
||||
protocol: protocol,
|
||||
socket: sock,
|
||||
timeout: timeout,
|
||||
activeClients: newClientCache(),
|
||||
dnsClients: newDNSClientCache(),
|
||||
sessionAffinityType: api.ServiceAffinityNone, // default
|
||||
}
|
||||
proxier.setServiceInfo(servicePortPortalName, si)
|
||||
|
||||
glog.V(2).Infof("Proxying for service %q at %s:%d/%s", servicePortPortalName, listenIP, port, protocol)
|
||||
go func(service ServicePortPortalName, proxier *Proxier) {
|
||||
defer runtime.HandleCrash()
|
||||
atomic.AddInt32(&proxier.numProxyLoops, 1)
|
||||
sock.ProxyLoop(service, si, proxier)
|
||||
atomic.AddInt32(&proxier.numProxyLoops, -1)
|
||||
}(servicePortPortalName, proxier)
|
||||
|
||||
return si, nil
|
||||
}
|
||||
|
||||
func (proxier *Proxier) closeServicePortPortal(servicePortPortalName ServicePortPortalName, info *serviceInfo) error {
|
||||
// turn off the proxy
|
||||
if err := proxier.stopProxy(servicePortPortalName, info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// close the PortalProxy by deleting the service IP address
|
||||
if info.portal.ip != allAvailableInterfaces {
|
||||
serviceIP := net.ParseIP(info.portal.ip)
|
||||
args := proxier.netshIpv4AddressDeleteArgs(serviceIP)
|
||||
if err := proxier.netsh.DeleteIPAddress(args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getListenIPPortMap returns a slice of all listen IPs for a service.
|
||||
func getListenIPPortMap(service *api.Service, listenPort int, nodePort int) map[string]int {
|
||||
listenIPPortMap := make(map[string]int)
|
||||
listenIPPortMap[service.Spec.ClusterIP] = listenPort
|
||||
|
||||
for _, ip := range service.Spec.ExternalIPs {
|
||||
listenIPPortMap[ip] = listenPort
|
||||
}
|
||||
|
||||
for _, ingress := range service.Status.LoadBalancer.Ingress {
|
||||
listenIPPortMap[ingress.IP] = listenPort
|
||||
}
|
||||
|
||||
if nodePort != 0 {
|
||||
listenIPPortMap[allAvailableInterfaces] = nodePort
|
||||
}
|
||||
|
||||
return listenIPPortMap
|
||||
}
|
||||
|
||||
func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortalName]bool {
|
||||
if service == nil {
|
||||
return nil
|
||||
}
|
||||
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
||||
if !helper.IsServiceIPSet(service) {
|
||||
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
|
||||
return nil
|
||||
}
|
||||
existingPortPortals := make(map[ServicePortPortalName]bool)
|
||||
|
||||
for i := range service.Spec.Ports {
|
||||
servicePort := &service.Spec.Ports[i]
|
||||
// create a slice of all the source IPs to use for service port portals
|
||||
listenIPPortMap := getListenIPPortMap(service, int(servicePort.Port), int(servicePort.NodePort))
|
||||
protocol := servicePort.Protocol
|
||||
|
||||
for listenIP, listenPort := range listenIPPortMap {
|
||||
servicePortPortalName := ServicePortPortalName{
|
||||
NamespacedName: svcName,
|
||||
Port: servicePort.Name,
|
||||
PortalIPName: listenIP,
|
||||
}
|
||||
existingPortPortals[servicePortPortalName] = true
|
||||
info, exists := proxier.getServiceInfo(servicePortPortalName)
|
||||
if exists && sameConfig(info, service, protocol, listenPort) {
|
||||
// Nothing changed.
|
||||
continue
|
||||
}
|
||||
if exists {
|
||||
glog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName)
|
||||
if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
|
||||
glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
|
||||
}
|
||||
}
|
||||
glog.V(1).Infof("Adding new service %q at %s:%d/%s", servicePortPortalName, listenIP, listenPort, protocol)
|
||||
info, err := proxier.addServicePortPortal(servicePortPortalName, protocol, listenIP, listenPort, proxier.udpIdleTimeout)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err)
|
||||
continue
|
||||
}
|
||||
info.sessionAffinityType = service.Spec.SessionAffinity
|
||||
glog.V(10).Infof("info: %#v", info)
|
||||
}
|
||||
if len(listenIPPortMap) > 0 {
|
||||
// only one loadbalancer per service port portal
|
||||
servicePortName := proxy.ServicePortName{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: service.Namespace,
|
||||
Name: service.Name,
|
||||
},
|
||||
Port: servicePort.Name,
|
||||
}
|
||||
timeoutSeconds := 0
|
||||
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
|
||||
timeoutSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
|
||||
}
|
||||
proxier.loadBalancer.NewService(servicePortName, service.Spec.SessionAffinity, timeoutSeconds)
|
||||
}
|
||||
}
|
||||
|
||||
return existingPortPortals
|
||||
}
|
||||
|
||||
func (proxier *Proxier) unmergeService(service *api.Service, existingPortPortals map[ServicePortPortalName]bool) {
|
||||
if service == nil {
|
||||
return
|
||||
}
|
||||
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
||||
if !helper.IsServiceIPSet(service) {
|
||||
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
|
||||
return
|
||||
}
|
||||
|
||||
servicePortNameMap := make(map[proxy.ServicePortName]bool)
|
||||
for name := range existingPortPortals {
|
||||
servicePortName := proxy.ServicePortName{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: name.Namespace,
|
||||
Name: name.Name,
|
||||
},
|
||||
Port: name.Port,
|
||||
}
|
||||
servicePortNameMap[servicePortName] = true
|
||||
}
|
||||
|
||||
for i := range service.Spec.Ports {
|
||||
servicePort := &service.Spec.Ports[i]
|
||||
serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
|
||||
// create a slice of all the source IPs to use for service port portals
|
||||
listenIPPortMap := getListenIPPortMap(service, int(servicePort.Port), int(servicePort.NodePort))
|
||||
|
||||
for listenIP := range listenIPPortMap {
|
||||
servicePortPortalName := ServicePortPortalName{
|
||||
NamespacedName: svcName,
|
||||
Port: servicePort.Name,
|
||||
PortalIPName: listenIP,
|
||||
}
|
||||
if existingPortPortals[servicePortPortalName] {
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Stopping service %q", servicePortPortalName)
|
||||
info, exists := proxier.getServiceInfo(servicePortPortalName)
|
||||
if !exists {
|
||||
glog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
|
||||
glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Only delete load balancer if all listen ips per name/port show inactive.
|
||||
if !servicePortNameMap[serviceName] {
|
||||
proxier.loadBalancer.DeleteService(serviceName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
|
||||
_ = proxier.mergeService(service)
|
||||
}
|
||||
|
||||
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
|
||||
existingPortPortals := proxier.mergeService(service)
|
||||
proxier.unmergeService(oldService, existingPortPortals)
|
||||
}
|
||||
|
||||
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
|
||||
proxier.unmergeService(service, map[ServicePortPortalName]bool{})
|
||||
}
|
||||
|
||||
func (proxier *Proxier) OnServiceSynced() {
|
||||
}
|
||||
|
||||
func sameConfig(info *serviceInfo, service *api.Service, protocol api.Protocol, listenPort int) bool {
|
||||
return info.protocol == protocol && info.portal.port == listenPort && info.sessionAffinityType == service.Spec.SessionAffinity
|
||||
}
|
||||
|
||||
func isTooManyFDsError(err error) bool {
|
||||
return strings.Contains(err.Error(), "too many open files")
|
||||
}
|
||||
|
||||
func isClosedError(err error) bool {
|
||||
// A brief discussion about handling closed error here:
|
||||
// https://code.google.com/p/go/issues/detail?id=4373#c14
|
||||
// TODO: maybe create a stoppable TCP listener that returns a StoppedError
|
||||
return strings.HasSuffix(err.Error(), "use of closed network connection")
|
||||
}
|
||||
|
||||
func (proxier *Proxier) netshIpv4AddressAddArgs(destIP net.IP) []string {
|
||||
intName := proxier.netsh.GetInterfaceToAddIP()
|
||||
args := []string{
|
||||
"interface", "ipv4", "add", "address",
|
||||
"name=" + intName,
|
||||
"address=" + destIP.String(),
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func (proxier *Proxier) netshIpv4AddressDeleteArgs(destIP net.IP) []string {
|
||||
intName := proxier.netsh.GetInterfaceToAddIP()
|
||||
args := []string{
|
||||
"interface", "ipv4", "delete", "address",
|
||||
"name=" + intName,
|
||||
"address=" + destIP.String(),
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
928
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier_test.go
generated
vendored
Normal file
928
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier_test.go
generated
vendored
Normal file
@ -0,0 +1,928 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
netshtest "k8s.io/kubernetes/pkg/util/netsh/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
udpIdleTimeoutForTest = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
func joinHostPort(host string, port int) string {
|
||||
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
||||
}
|
||||
|
||||
func waitForClosedPortTCP(p *Proxier, proxyPort int) error {
|
||||
for i := 0; i < 50; i++ {
|
||||
conn, err := net.Dial("tcp", joinHostPort("", proxyPort))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
conn.Close()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("port %d still open", proxyPort)
|
||||
}
|
||||
|
||||
func waitForClosedPortUDP(p *Proxier, proxyPort int) error {
|
||||
for i := 0; i < 50; i++ {
|
||||
conn, err := net.Dial("udp", joinHostPort("", proxyPort))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
|
||||
// To detect a closed UDP port write, then read.
|
||||
_, err = conn.Write([]byte("x"))
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok && !e.Timeout() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
var buf [4]byte
|
||||
_, err = conn.Read(buf[0:])
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok && !e.Timeout() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
conn.Close()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("port %d still open", proxyPort)
|
||||
}
|
||||
|
||||
var tcpServerPort int32
|
||||
var udpServerPort int32
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Don't handle panics
|
||||
runtime.ReallyCrash = true
|
||||
|
||||
// TCP setup.
|
||||
tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(r.URL.Path[1:]))
|
||||
}))
|
||||
defer tcp.Close()
|
||||
|
||||
u, err := url.Parse(tcp.URL)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
_, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
tcpServerPortValue, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
||||
}
|
||||
tcpServerPort = int32(tcpServerPortValue)
|
||||
|
||||
// UDP setup.
|
||||
udp, err := newUDPEchoServer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to make a UDP server: %v", err))
|
||||
}
|
||||
_, port, err = net.SplitHostPort(udp.LocalAddr().String())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
udpServerPortValue, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
||||
}
|
||||
udpServerPort = int32(udpServerPortValue)
|
||||
go udp.Loop()
|
||||
|
||||
ret := m.Run()
|
||||
// it should be safe to call Close() multiple times.
|
||||
tcp.Close()
|
||||
os.Exit(ret)
|
||||
}
|
||||
|
||||
func testEchoTCP(t *testing.T, address string, port int) {
|
||||
path := "aaaaa"
|
||||
res, err := http.Get("http://" + address + ":" + fmt.Sprintf("%d", port) + "/" + path)
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to server: %v", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Errorf("error reading data: %v %v", err, string(data))
|
||||
}
|
||||
if string(data) != path {
|
||||
t.Errorf("expected: %s, got %s", path, string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func testEchoUDP(t *testing.T, address string, port int) {
|
||||
data := "abc123"
|
||||
|
||||
conn, err := net.Dial("udp", joinHostPort(address, port))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to server: %v", err)
|
||||
}
|
||||
if _, err := conn.Write([]byte(data)); err != nil {
|
||||
t.Fatalf("error sending to server: %v", err)
|
||||
}
|
||||
var resp [1024]byte
|
||||
n, err := conn.Read(resp[0:])
|
||||
if err != nil {
|
||||
t.Errorf("error receiving data: %v", err)
|
||||
}
|
||||
if string(resp[0:n]) != data {
|
||||
t.Errorf("expected: %s, got %s", data, string(resp[0:n]))
|
||||
}
|
||||
}
|
||||
|
||||
func waitForNumProxyLoops(t *testing.T, p *Proxier, want int32) {
|
||||
var got int32
|
||||
for i := 0; i < 600; i++ {
|
||||
got = atomic.LoadInt32(&p.numProxyLoops)
|
||||
if got == want {
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
t.Errorf("expected %d ProxyLoops running, got %d", want, got)
|
||||
}
|
||||
|
||||
func waitForNumProxyClients(t *testing.T, s *serviceInfo, want int, timeout time.Duration) {
|
||||
var got int
|
||||
now := time.Now()
|
||||
deadline := now.Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
s.activeClients.mu.Lock()
|
||||
got = len(s.activeClients.clients)
|
||||
s.activeClients.mu.Unlock()
|
||||
if got == want {
|
||||
return
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
t.Errorf("expected %d ProxyClients live, got %d", want, got)
|
||||
}
|
||||
|
||||
func getPortNum(t *testing.T, addr string) int {
|
||||
_, portStr, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
t.Errorf("error getting port from %s", addr)
|
||||
return 0
|
||||
}
|
||||
portNum, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
t.Errorf("error getting port from %s", addr)
|
||||
return 0
|
||||
}
|
||||
|
||||
return portNum
|
||||
}
|
||||
|
||||
func TestTCPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyTimeout(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
// When connecting to a UDP service endpoint, there should be a Conn for proxy.
|
||||
waitForNumProxyClients(t, svcInfo, 1, time.Second)
|
||||
// If conn has no activity for serviceInfo.timeout since last Read/Write, it should be closed because of timeout.
|
||||
waitForNumProxyClients(t, svcInfo, 0, 2*time.Second)
|
||||
}
|
||||
|
||||
func TestMultiPortProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalNameP := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceP.Namespace, Name: serviceP.Name}, Port: serviceP.Port, PortalIPName: listenIP}
|
||||
svcInfoP, err := p.addServicePortPortal(servicePortPortalNameP, "TCP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfoP.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
servicePortPortalNameQ := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceQ.Namespace, Name: serviceQ.Name}, Port: serviceQ.Port, PortalIPName: listenIP}
|
||||
svcInfoQ, err := p.addServicePortPortal(servicePortPortalNameQ, "UDP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfoQ.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 2)
|
||||
}
|
||||
|
||||
func TestMultiPortOnServiceAdd(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "q"}
|
||||
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "0.0.0.0", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 0,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "q",
|
||||
Port: 0,
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
waitForNumProxyLoops(t, p, 2)
|
||||
|
||||
servicePortPortalNameP := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceP.Namespace, Name: serviceP.Name}, Port: serviceP.Port, PortalIPName: listenIP}
|
||||
svcInfo, exists := p.getServiceInfo(servicePortPortalNameP)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalNameP)
|
||||
}
|
||||
if svcInfo.portal.ip != "0.0.0.0" || svcInfo.portal.port != 0 || svcInfo.protocol != "TCP" {
|
||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
|
||||
}
|
||||
|
||||
servicePortPortalNameQ := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceQ.Namespace, Name: serviceQ.Name}, Port: serviceQ.Port, PortalIPName: listenIP}
|
||||
svcInfo, exists = p.getServiceInfo(servicePortPortalNameQ)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalNameQ)
|
||||
}
|
||||
if svcInfo.portal.ip != "0.0.0.0" || svcInfo.portal.port != 0 || svcInfo.protocol != "UDP" {
|
||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo)
|
||||
}
|
||||
|
||||
servicePortPortalNameX := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceX.Namespace, Name: serviceX.Name}, Port: serviceX.Port, PortalIPName: listenIP}
|
||||
svcInfo, exists = p.getServiceInfo(servicePortPortalNameX)
|
||||
if exists {
|
||||
t.Fatalf("found unwanted serviceInfo for %s: %#v", serviceX, svcInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: Stops the proxy for the named service.
|
||||
func stopProxyByName(proxier *Proxier, service ServicePortPortalName) error {
|
||||
info, found := proxier.getServiceInfo(service)
|
||||
if !found {
|
||||
return fmt.Errorf("unknown service: %s", service)
|
||||
}
|
||||
return proxier.stopProxy(service, info)
|
||||
}
|
||||
|
||||
func TestTCPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
if !svcInfo.isAlive() {
|
||||
t.Fatalf("wrong value for isAlive(): expected true")
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
stopProxyByName(p, servicePortPortalName)
|
||||
if svcInfo.isAlive() {
|
||||
t.Fatalf("wrong value for isAlive(): expected false")
|
||||
}
|
||||
// Wait for the port to really close.
|
||||
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestUDPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
stopProxyByName(p, servicePortPortalName)
|
||||
// Wait for the port to really close.
|
||||
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
fmt.Println("here0")
|
||||
conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
})
|
||||
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
})
|
||||
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
// need to add endpoint here because it got clean up during service delete
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
})
|
||||
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
// need to add endpoint here because it got clean up during service delete
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 0,
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
// This is a bit async, but this should be sufficient.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 0,
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestProxyUpdatePublicIPs(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
})
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceAdd(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
}},
|
||||
ClusterIP: svcInfo.portal.ip,
|
||||
ExternalIPs: []string{"0.0.0.0"},
|
||||
},
|
||||
})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
// This is a bit async, but this should be sufficient.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestProxyUpdatePortal(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
|
||||
listenIP := "0.0.0.0"
|
||||
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
svcv0 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
|
||||
svcv1 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
|
||||
p.OnServiceUpdate(svcv0, svcv1)
|
||||
_, exists := p.getServiceInfo(servicePortPortalName)
|
||||
if exists {
|
||||
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
|
||||
}
|
||||
|
||||
svcv2 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
p.OnServiceUpdate(svcv1, svcv2)
|
||||
_, exists = p.getServiceInfo(servicePortPortalName)
|
||||
if exists {
|
||||
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
|
||||
}
|
||||
|
||||
svcv3 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
p.OnServiceUpdate(svcv2, svcv3)
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
svcInfo, exists = p.getServiceInfo(servicePortPortalName)
|
||||
if !exists {
|
||||
t.Fatalf("service with ClusterIP set not found in the proxy")
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in
|
632
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go
generated
vendored
Normal file
632
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go
generated
vendored
Normal file
@ -0,0 +1,632 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/miekg/dns"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/ipconfig"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
const (
|
||||
// Kubernetes DNS suffix search list
|
||||
// TODO: Get DNS suffix search list from docker containers.
|
||||
// --dns-search option doesn't work on Windows containers and has been
|
||||
// fixed recently in docker.
|
||||
|
||||
// Kubernetes cluster domain
|
||||
clusterDomain = "cluster.local"
|
||||
|
||||
// Kubernetes service domain
|
||||
serviceDomain = "svc." + clusterDomain
|
||||
|
||||
// Kubernetes default namespace domain
|
||||
namespaceServiceDomain = "default." + serviceDomain
|
||||
|
||||
// Kubernetes DNS service port name
|
||||
dnsPortName = "dns"
|
||||
|
||||
// DNS TYPE value A (a host address)
|
||||
dnsTypeA uint16 = 0x01
|
||||
|
||||
// DNS TYPE value AAAA (a host IPv6 address)
|
||||
dnsTypeAAAA uint16 = 0x1c
|
||||
|
||||
// DNS CLASS value IN (the Internet)
|
||||
dnsClassInternet uint16 = 0x01
|
||||
)
|
||||
|
||||
// Abstraction over TCP/UDP sockets which are proxied.
|
||||
type proxySocket interface {
|
||||
// Addr gets the net.Addr for a proxySocket.
|
||||
Addr() net.Addr
|
||||
// Close stops the proxySocket from accepting incoming connections.
|
||||
// Each implementation should comment on the impact of calling Close
|
||||
// while sessions are active.
|
||||
Close() error
|
||||
// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
|
||||
ProxyLoop(service ServicePortPortalName, info *serviceInfo, proxier *Proxier)
|
||||
// ListenPort returns the host port that the proxySocket is listening on
|
||||
ListenPort() int
|
||||
}
|
||||
|
||||
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (proxySocket, error) {
|
||||
host := ""
|
||||
if ip != nil {
|
||||
host = ip.String()
|
||||
}
|
||||
|
||||
switch strings.ToUpper(string(protocol)) {
|
||||
case "TCP":
|
||||
listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tcpProxySocket{Listener: listener, port: port}, nil
|
||||
case "UDP":
|
||||
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpProxySocket{UDPConn: conn, port: port}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown protocol %q", protocol)
|
||||
}
|
||||
|
||||
// How long we wait for a connection to a backend in seconds
|
||||
var endpointDialTimeout = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
|
||||
|
||||
// tcpProxySocket implements proxySocket. Close() is implemented by net.Listener. When Close() is called,
|
||||
// no new connections are allowed but existing connections are left untouched.
|
||||
type tcpProxySocket struct {
|
||||
net.Listener
|
||||
port int
|
||||
}
|
||||
|
||||
func (tcp *tcpProxySocket) ListenPort() int {
|
||||
return tcp.port
|
||||
}
|
||||
|
||||
func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string, proxier *Proxier) (out net.Conn, err error) {
|
||||
sessionAffinityReset := false
|
||||
for _, dialTimeout := range endpointDialTimeout {
|
||||
servicePortName := proxy.ServicePortName{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: service.Namespace,
|
||||
Name: service.Name,
|
||||
},
|
||||
Port: service.Port,
|
||||
}
|
||||
endpoint, err := proxier.loadBalancer.NextEndpoint(servicePortName, srcAddr, sessionAffinityReset)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
|
||||
if err != nil {
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Dial failed: " + err.Error())
|
||||
}
|
||||
glog.Errorf("Dial failed: %v", err)
|
||||
sessionAffinityReset = true
|
||||
continue
|
||||
}
|
||||
return outConn, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to connect to an endpoint.")
|
||||
}
|
||||
|
||||
func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serviceInfo, proxier *Proxier) {
|
||||
for {
|
||||
if !myInfo.isAlive() {
|
||||
// The service port was closed or replaced.
|
||||
return
|
||||
}
|
||||
// Block until a connection is made.
|
||||
inConn, err := tcp.Accept()
|
||||
if err != nil {
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Accept failed: " + err.Error())
|
||||
}
|
||||
|
||||
if isClosedError(err) {
|
||||
return
|
||||
}
|
||||
if !myInfo.isAlive() {
|
||||
// Then the service port was just closed so the accept failure is to be expected.
|
||||
return
|
||||
}
|
||||
glog.Errorf("Accept failed: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
|
||||
outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to balancer: %v", err)
|
||||
inConn.Close()
|
||||
continue
|
||||
}
|
||||
// Spin up an async copy loop.
|
||||
go proxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
|
||||
}
|
||||
}
|
||||
|
||||
// proxyTCP proxies data bi-directionally between in and out.
|
||||
func proxyTCP(in, out *net.TCPConn) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
|
||||
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
|
||||
go copyBytes("from backend", in, out, &wg)
|
||||
go copyBytes("to backend", out, in, &wg)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
n, err := io.Copy(dest, src)
|
||||
if err != nil {
|
||||
if !isClosedError(err) {
|
||||
glog.Errorf("I/O error: %v", err)
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
dest.Close()
|
||||
src.Close()
|
||||
}
|
||||
|
||||
// udpProxySocket implements proxySocket. Close() is implemented by net.UDPConn. When Close() is called,
|
||||
// no new connections are allowed and existing connections are broken.
|
||||
// TODO: We could lame-duck this ourselves, if it becomes important.
|
||||
type udpProxySocket struct {
|
||||
*net.UDPConn
|
||||
port int
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) ListenPort() int {
|
||||
return udp.port
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) Addr() net.Addr {
|
||||
return udp.LocalAddr()
|
||||
}
|
||||
|
||||
// Holds all the known UDP clients that have not timed out.
|
||||
type clientCache struct {
|
||||
mu sync.Mutex
|
||||
clients map[string]net.Conn // addr string -> connection
|
||||
}
|
||||
|
||||
func newClientCache() *clientCache {
|
||||
return &clientCache{clients: map[string]net.Conn{}}
|
||||
}
|
||||
|
||||
// DNS query client classified by address and QTYPE
|
||||
type dnsClientQuery struct {
|
||||
clientAddress string
|
||||
dnsQType uint16
|
||||
}
|
||||
|
||||
// Holds DNS client query, the value contains the index in DNS suffix search list,
|
||||
// the original DNS message and length for the same client and QTYPE
|
||||
type dnsClientCache struct {
|
||||
mu sync.Mutex
|
||||
clients map[dnsClientQuery]*dnsQueryState
|
||||
}
|
||||
|
||||
type dnsQueryState struct {
|
||||
searchIndex int32
|
||||
msg *dns.Msg
|
||||
}
|
||||
|
||||
func newDNSClientCache() *dnsClientCache {
|
||||
return &dnsClientCache{clients: map[dnsClientQuery]*dnsQueryState{}}
|
||||
}
|
||||
|
||||
func packetRequiresDNSSuffix(dnsType, dnsClass uint16) bool {
|
||||
return (dnsType == dnsTypeA || dnsType == dnsTypeAAAA) && dnsClass == dnsClassInternet
|
||||
}
|
||||
|
||||
func isDNSService(portName string) bool {
|
||||
return portName == dnsPortName
|
||||
}
|
||||
|
||||
func appendDNSSuffix(msg *dns.Msg, buffer []byte, length int, dnsSuffix string) (int, error) {
|
||||
if msg == nil || len(msg.Question) == 0 {
|
||||
return length, fmt.Errorf("DNS message parameter is invalid")
|
||||
}
|
||||
|
||||
// Save the original name since it will be reused for next iteration
|
||||
origName := msg.Question[0].Name
|
||||
if dnsSuffix != "" {
|
||||
msg.Question[0].Name += dnsSuffix + "."
|
||||
}
|
||||
mbuf, err := msg.PackBuffer(buffer)
|
||||
msg.Question[0].Name = origName
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to pack DNS packet. Error is: %v", err)
|
||||
return length, err
|
||||
}
|
||||
|
||||
if &buffer[0] != &mbuf[0] {
|
||||
return length, fmt.Errorf("Buffer is too small in packing DNS packet")
|
||||
}
|
||||
|
||||
return len(mbuf), nil
|
||||
}
|
||||
|
||||
func recoverDNSQuestion(origName string, msg *dns.Msg, buffer []byte, length int) (int, error) {
|
||||
if msg == nil || len(msg.Question) == 0 {
|
||||
return length, fmt.Errorf("DNS message parameter is invalid")
|
||||
}
|
||||
|
||||
if origName == msg.Question[0].Name {
|
||||
return length, nil
|
||||
}
|
||||
|
||||
msg.Question[0].Name = origName
|
||||
if len(msg.Answer) > 0 {
|
||||
msg.Answer[0].Header().Name = origName
|
||||
}
|
||||
mbuf, err := msg.PackBuffer(buffer)
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to pack DNS packet. Error is: %v", err)
|
||||
return length, err
|
||||
}
|
||||
|
||||
if &buffer[0] != &mbuf[0] {
|
||||
return length, fmt.Errorf("Buffer is too small in packing DNS packet")
|
||||
}
|
||||
|
||||
return len(mbuf), nil
|
||||
}
|
||||
|
||||
func processUnpackedDNSQueryPacket(
|
||||
dnsClients *dnsClientCache,
|
||||
msg *dns.Msg,
|
||||
host string,
|
||||
dnsQType uint16,
|
||||
buffer []byte,
|
||||
length int,
|
||||
dnsSearch []string) int {
|
||||
if dnsSearch == nil || len(dnsSearch) == 0 {
|
||||
glog.V(1).Infof("DNS search list is not initialized and is empty.")
|
||||
return length
|
||||
}
|
||||
|
||||
// TODO: handle concurrent queries from a client
|
||||
dnsClients.mu.Lock()
|
||||
state, found := dnsClients.clients[dnsClientQuery{host, dnsQType}]
|
||||
if !found {
|
||||
state = &dnsQueryState{0, msg}
|
||||
dnsClients.clients[dnsClientQuery{host, dnsQType}] = state
|
||||
}
|
||||
dnsClients.mu.Unlock()
|
||||
|
||||
index := atomic.SwapInt32(&state.searchIndex, state.searchIndex+1)
|
||||
// Also update message ID if the client retries due to previous query time out
|
||||
state.msg.MsgHdr.Id = msg.MsgHdr.Id
|
||||
|
||||
if index < 0 || index >= int32(len(dnsSearch)) {
|
||||
glog.V(1).Infof("Search index %d is out of range.", index)
|
||||
return length
|
||||
}
|
||||
|
||||
length, err := appendDNSSuffix(msg, buffer, length, dnsSearch[index])
|
||||
if err != nil {
|
||||
glog.Errorf("Append DNS suffix failed: %v", err)
|
||||
}
|
||||
|
||||
return length
|
||||
}
|
||||
|
||||
func processUnpackedDNSResponsePacket(
|
||||
svrConn net.Conn,
|
||||
dnsClients *dnsClientCache,
|
||||
msg *dns.Msg,
|
||||
rcode int,
|
||||
host string,
|
||||
dnsQType uint16,
|
||||
buffer []byte,
|
||||
length int,
|
||||
dnsSearch []string) (bool, int) {
|
||||
var drop bool
|
||||
var err error
|
||||
if dnsSearch == nil || len(dnsSearch) == 0 {
|
||||
glog.V(1).Infof("DNS search list is not initialized and is empty.")
|
||||
return drop, length
|
||||
}
|
||||
|
||||
dnsClients.mu.Lock()
|
||||
state, found := dnsClients.clients[dnsClientQuery{host, dnsQType}]
|
||||
dnsClients.mu.Unlock()
|
||||
|
||||
if found {
|
||||
index := atomic.SwapInt32(&state.searchIndex, state.searchIndex+1)
|
||||
if rcode != 0 && index >= 0 && index < int32(len(dnsSearch)) {
|
||||
// If the reponse has failure and iteration through the search list has not
|
||||
// reached the end, retry on behalf of the client using the original query message
|
||||
drop = true
|
||||
length, err = appendDNSSuffix(state.msg, buffer, length, dnsSearch[index])
|
||||
if err != nil {
|
||||
glog.Errorf("Append DNS suffix failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = svrConn.Write(buffer[0:length])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Write failed: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
length, err = recoverDNSQuestion(state.msg.Question[0].Name, msg, buffer, length)
|
||||
if err != nil {
|
||||
glog.Errorf("Recover DNS question failed: %v", err)
|
||||
}
|
||||
|
||||
dnsClients.mu.Lock()
|
||||
delete(dnsClients.clients, dnsClientQuery{host, dnsQType})
|
||||
dnsClients.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
return drop, length
|
||||
}
|
||||
|
||||
func processDNSQueryPacket(
|
||||
dnsClients *dnsClientCache,
|
||||
cliAddr net.Addr,
|
||||
buffer []byte,
|
||||
length int,
|
||||
dnsSearch []string) (int, error) {
|
||||
msg := &dns.Msg{}
|
||||
if err := msg.Unpack(buffer[:length]); err != nil {
|
||||
glog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
|
||||
return length, err
|
||||
}
|
||||
|
||||
// Query - Response bit that specifies whether this message is a query (0) or a response (1).
|
||||
if msg.MsgHdr.Response == true {
|
||||
return length, fmt.Errorf("DNS packet should be a query message")
|
||||
}
|
||||
|
||||
// QDCOUNT
|
||||
if len(msg.Question) != 1 {
|
||||
glog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question))
|
||||
glog.V(1).Infof("DNS suffix appending does not support more than one question.")
|
||||
return length, nil
|
||||
}
|
||||
|
||||
// ANCOUNT, NSCOUNT, ARCOUNT
|
||||
if len(msg.Answer) != 0 || len(msg.Ns) != 0 || len(msg.Extra) != 0 {
|
||||
glog.V(1).Infof("DNS packet contains more than question section.")
|
||||
return length, nil
|
||||
}
|
||||
|
||||
dnsQType := msg.Question[0].Qtype
|
||||
dnsQClass := msg.Question[0].Qclass
|
||||
if packetRequiresDNSSuffix(dnsQType, dnsQClass) {
|
||||
host, _, err := net.SplitHostPort(cliAddr.String())
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to get host from client address: %v", err)
|
||||
host = cliAddr.String()
|
||||
}
|
||||
|
||||
length = processUnpackedDNSQueryPacket(dnsClients, msg, host, dnsQType, buffer, length, dnsSearch)
|
||||
}
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func processDNSResponsePacket(
|
||||
svrConn net.Conn,
|
||||
dnsClients *dnsClientCache,
|
||||
cliAddr net.Addr,
|
||||
buffer []byte,
|
||||
length int,
|
||||
dnsSearch []string) (bool, int, error) {
|
||||
var drop bool
|
||||
msg := &dns.Msg{}
|
||||
if err := msg.Unpack(buffer[:length]); err != nil {
|
||||
glog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
|
||||
return drop, length, err
|
||||
}
|
||||
|
||||
// Query - Response bit that specifies whether this message is a query (0) or a response (1).
|
||||
if msg.MsgHdr.Response == false {
|
||||
return drop, length, fmt.Errorf("DNS packet should be a response message")
|
||||
}
|
||||
|
||||
// QDCOUNT
|
||||
if len(msg.Question) != 1 {
|
||||
glog.V(1).Infof("Number of entries in the reponse section of the DNS packet is: %d", len(msg.Answer))
|
||||
return drop, length, nil
|
||||
}
|
||||
|
||||
dnsQType := msg.Question[0].Qtype
|
||||
dnsQClass := msg.Question[0].Qclass
|
||||
if packetRequiresDNSSuffix(dnsQType, dnsQClass) {
|
||||
host, _, err := net.SplitHostPort(cliAddr.String())
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to get host from client address: %v", err)
|
||||
host = cliAddr.String()
|
||||
}
|
||||
|
||||
drop, length = processUnpackedDNSResponsePacket(svrConn, dnsClients, msg, msg.MsgHdr.Rcode, host, dnsQType, buffer, length, dnsSearch)
|
||||
}
|
||||
|
||||
return drop, length, nil
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serviceInfo, proxier *Proxier) {
|
||||
var buffer [4096]byte // 4KiB should be enough for most whole-packets
|
||||
var dnsSearch []string
|
||||
if isDNSService(service.Port) {
|
||||
dnsSearch = []string{"", namespaceServiceDomain, serviceDomain, clusterDomain}
|
||||
execer := exec.New()
|
||||
ipconfigInterface := ipconfig.New(execer)
|
||||
suffixList, err := ipconfigInterface.GetDnsSuffixSearchList()
|
||||
if err == nil {
|
||||
for _, suffix := range suffixList {
|
||||
dnsSearch = append(dnsSearch, suffix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
if !myInfo.isAlive() {
|
||||
// The service port was closed or replaced.
|
||||
break
|
||||
}
|
||||
|
||||
// Block until data arrives.
|
||||
// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
|
||||
n, cliAddr, err := udp.ReadFrom(buffer[0:])
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok {
|
||||
if e.Temporary() {
|
||||
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
|
||||
break
|
||||
}
|
||||
|
||||
// If this is DNS query packet
|
||||
if isDNSService(service.Port) {
|
||||
n, err = processDNSQueryPacket(myInfo.dnsClients, cliAddr, buffer[:], n, dnsSearch)
|
||||
if err != nil {
|
||||
glog.Errorf("Process DNS query packet failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// If this is a client we know already, reuse the connection and goroutine.
|
||||
svrConn, err := udp.getBackendConn(myInfo.activeClients, myInfo.dnsClients, cliAddr, proxier, service, myInfo.timeout, dnsSearch)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// TODO: It would be nice to let the goroutine handle this write, but we don't
|
||||
// really want to copy the buffer. We could do a pool of buffers or something.
|
||||
_, err = svrConn.Write(buffer[0:n])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Write failed: %v", err)
|
||||
// TODO: Maybe tear down the goroutine for this client/server pair?
|
||||
}
|
||||
continue
|
||||
}
|
||||
err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, dnsClients *dnsClientCache, cliAddr net.Addr, proxier *Proxier, service ServicePortPortalName, timeout time.Duration, dnsSearch []string) (net.Conn, error) {
|
||||
activeClients.mu.Lock()
|
||||
defer activeClients.mu.Unlock()
|
||||
|
||||
svrConn, found := activeClients.clients[cliAddr.String()]
|
||||
if !found {
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
glog.V(3).Infof("New UDP connection from %s", cliAddr)
|
||||
var err error
|
||||
svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
activeClients.clients[cliAddr.String()] = svrConn
|
||||
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, dnsClients *dnsClientCache, service ServicePortPortalName, timeout time.Duration, dnsSearch []string) {
|
||||
defer runtime.HandleCrash()
|
||||
udp.proxyClient(cliAddr, svrConn, activeClients, dnsClients, service, timeout, dnsSearch)
|
||||
}(cliAddr, svrConn, activeClients, dnsClients, service, timeout, dnsSearch)
|
||||
}
|
||||
return svrConn, nil
|
||||
}
|
||||
|
||||
// This function is expected to be called as a goroutine.
|
||||
// TODO: Track and log bytes copied, like TCP
|
||||
func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, dnsClients *dnsClientCache, service ServicePortPortalName, timeout time.Duration, dnsSearch []string) {
|
||||
defer svrConn.Close()
|
||||
var buffer [4096]byte
|
||||
for {
|
||||
n, err := svrConn.Read(buffer[0:])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Read failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
drop := false
|
||||
if isDNSService(service.Port) {
|
||||
drop, n, err = processDNSResponsePacket(svrConn, dnsClients, cliAddr, buffer[:], n, dnsSearch)
|
||||
if err != nil {
|
||||
glog.Errorf("Process DNS response packet failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !drop {
|
||||
err = svrConn.SetDeadline(time.Now().Add(timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
break
|
||||
}
|
||||
n, err = udp.WriteTo(buffer[0:n], cliAddr)
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("WriteTo failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
activeClients.mu.Lock()
|
||||
delete(activeClients.clients, cliAddr.String())
|
||||
activeClients.mu.Unlock()
|
||||
}
|
374
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go
generated
vendored
Normal file
374
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go
generated
vendored
Normal file
@ -0,0 +1,374 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMissingServiceEntry = errors.New("missing service entry")
|
||||
ErrMissingEndpoints = errors.New("missing endpoints")
|
||||
)
|
||||
|
||||
type affinityState struct {
|
||||
clientIP string
|
||||
//clientProtocol api.Protocol //not yet used
|
||||
//sessionCookie string //not yet used
|
||||
endpoint string
|
||||
lastUsed time.Time
|
||||
}
|
||||
|
||||
type affinityPolicy struct {
|
||||
affinityType api.ServiceAffinity
|
||||
affinityMap map[string]*affinityState // map client IP -> affinity info
|
||||
ttlSeconds int
|
||||
}
|
||||
|
||||
// LoadBalancerRR is a round-robin load balancer.
|
||||
type LoadBalancerRR struct {
|
||||
lock sync.RWMutex
|
||||
services map[proxy.ServicePortName]*balancerState
|
||||
}
|
||||
|
||||
// Ensure this implements LoadBalancer.
|
||||
var _ LoadBalancer = &LoadBalancerRR{}
|
||||
|
||||
type balancerState struct {
|
||||
endpoints []string // a list of "ip:port" style strings
|
||||
index int // current index into endpoints
|
||||
affinity affinityPolicy
|
||||
}
|
||||
|
||||
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlSeconds int) *affinityPolicy {
|
||||
return &affinityPolicy{
|
||||
affinityType: affinityType,
|
||||
affinityMap: make(map[string]*affinityState),
|
||||
ttlSeconds: ttlSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLoadBalancerRR returns a new LoadBalancerRR.
|
||||
func NewLoadBalancerRR() *LoadBalancerRR {
|
||||
return &LoadBalancerRR{
|
||||
services: map[proxy.ServicePortName]*balancerState{},
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) error {
|
||||
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This assumes that lb.lock is already held.
|
||||
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) *balancerState {
|
||||
if ttlSeconds == 0 {
|
||||
ttlSeconds = int(api.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
|
||||
}
|
||||
|
||||
if _, exists := lb.services[svcPort]; !exists {
|
||||
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
|
||||
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
|
||||
} else if affinityType != "" {
|
||||
lb.services[svcPort].affinity.affinityType = affinityType
|
||||
}
|
||||
return lb.services[svcPort]
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
|
||||
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
delete(lb.services, svcPort)
|
||||
}
|
||||
|
||||
// return true if this service is using some form of session affinity.
|
||||
func isSessionAffinity(affinity *affinityPolicy) bool {
|
||||
// Should never be empty string, but checking for it to be safe.
|
||||
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NextEndpoint returns a service endpoint.
|
||||
// The service endpoint is chosen using the round-robin algorithm.
|
||||
func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) {
|
||||
// Coarse locking is simple. We can get more fine-grained if/when we
|
||||
// can prove it matters.
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists || state == nil {
|
||||
return "", ErrMissingServiceEntry
|
||||
}
|
||||
if len(state.endpoints) == 0 {
|
||||
return "", ErrMissingEndpoints
|
||||
}
|
||||
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
|
||||
|
||||
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
|
||||
|
||||
var ipaddr string
|
||||
if sessionAffinityEnabled {
|
||||
// Caution: don't shadow ipaddr
|
||||
var err error
|
||||
ipaddr, _, err = net.SplitHostPort(srcAddr.String())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err)
|
||||
}
|
||||
if !sessionAffinityReset {
|
||||
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
|
||||
if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
|
||||
// Affinity wins.
|
||||
endpoint := sessionAffinity.endpoint
|
||||
sessionAffinity.lastUsed = time.Now()
|
||||
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
|
||||
return endpoint, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// Take the next endpoint.
|
||||
endpoint := state.endpoints[state.index]
|
||||
state.index = (state.index + 1) % len(state.endpoints)
|
||||
|
||||
if sessionAffinityEnabled {
|
||||
var affinity *affinityState
|
||||
affinity = state.affinity.affinityMap[ipaddr]
|
||||
if affinity == nil {
|
||||
affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()}
|
||||
state.affinity.affinityMap[ipaddr] = affinity
|
||||
}
|
||||
affinity.lastUsed = time.Now()
|
||||
affinity.endpoint = endpoint
|
||||
affinity.clientIP = ipaddr
|
||||
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
type hostPortPair struct {
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
func isValidEndpoint(hpp *hostPortPair) bool {
|
||||
return hpp.host != "" && hpp.port > 0
|
||||
}
|
||||
|
||||
func flattenValidEndpoints(endpoints []hostPortPair) []string {
|
||||
// Convert Endpoint objects into strings for easier use later. Ignore
|
||||
// the protocol field - we'll get that from the Service objects.
|
||||
var result []string
|
||||
for i := range endpoints {
|
||||
hpp := &endpoints[i]
|
||||
if isValidEndpoint(hpp) {
|
||||
result = append(result, net.JoinHostPort(hpp.host, strconv.Itoa(hpp.port)))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
|
||||
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
|
||||
for _, affinity := range state.affinity.affinityMap {
|
||||
if affinity.endpoint == endpoint {
|
||||
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
|
||||
delete(state.affinity.affinityMap, affinity.clientIP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
|
||||
// Then remove any session affinity records that are not in both lists.
|
||||
// This assumes the lb.lock is held.
|
||||
func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) {
|
||||
allEndpoints := map[string]int{}
|
||||
for _, newEndpoint := range newEndpoints {
|
||||
allEndpoints[newEndpoint] = 1
|
||||
}
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
for _, existingEndpoint := range state.endpoints {
|
||||
allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1
|
||||
}
|
||||
for mKey, mVal := range allEndpoints {
|
||||
if mVal == 1 {
|
||||
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
|
||||
removeSessionAffinityByEndpoint(state, svcPort, mKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildPortsToEndpointsMap builds a map of portname -> all ip:ports for that
|
||||
// portname. Explode Endpoints.Subsets[*] into this structure.
|
||||
func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPair {
|
||||
portsToEndpoints := map[string][]hostPortPair{}
|
||||
for i := range endpoints.Subsets {
|
||||
ss := &endpoints.Subsets[i]
|
||||
for i := range ss.Ports {
|
||||
port := &ss.Ports[i]
|
||||
for i := range ss.Addresses {
|
||||
addr := &ss.Addresses[i]
|
||||
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)})
|
||||
// Ignore the protocol field - we'll get that from the Service objects.
|
||||
}
|
||||
}
|
||||
}
|
||||
return portsToEndpoints
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
|
||||
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
|
||||
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
||||
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
|
||||
state, exists := lb.services[svcPort]
|
||||
|
||||
if !exists || state == nil || len(newEndpoints) > 0 {
|
||||
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
||||
// OnEndpointsAdd can be called without NewService being called externally.
|
||||
// To be safe we will call it here. A new service will only be created
|
||||
// if one does not already exist. The affinity will be updated
|
||||
// later, once NewService is called.
|
||||
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
|
||||
state.endpoints = slice.ShuffleStrings(newEndpoints)
|
||||
|
||||
// Reset the round-robin index.
|
||||
state.index = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
|
||||
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
|
||||
oldPortsToEndpoints := buildPortsToEndpointsMap(oldEndpoints)
|
||||
registeredEndpoints := make(map[proxy.ServicePortName]bool)
|
||||
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
||||
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
|
||||
state, exists := lb.services[svcPort]
|
||||
|
||||
curEndpoints := []string{}
|
||||
if state != nil {
|
||||
curEndpoints = state.endpoints
|
||||
}
|
||||
|
||||
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
|
||||
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
||||
// OnEndpointsUpdate can be called without NewService being called externally.
|
||||
// To be safe we will call it here. A new service will only be created
|
||||
// if one does not already exist. The affinity will be updated
|
||||
// later, once NewService is called.
|
||||
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
|
||||
state.endpoints = slice.ShuffleStrings(newEndpoints)
|
||||
|
||||
// Reset the round-robin index.
|
||||
state.index = 0
|
||||
}
|
||||
registeredEndpoints[svcPort] = true
|
||||
}
|
||||
|
||||
for portname := range oldPortsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
||||
if _, exists := registeredEndpoints[svcPort]; !exists {
|
||||
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
|
||||
// Reset but don't delete.
|
||||
state := lb.services[svcPort]
|
||||
state.endpoints = []string{}
|
||||
state.index = 0
|
||||
state.affinity.affinityMap = map[string]*affinityState{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
|
||||
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
|
||||
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
||||
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
|
||||
// If the service is still around, reset but don't delete.
|
||||
if state, ok := lb.services[svcPort]; ok {
|
||||
state.endpoints = []string{}
|
||||
state.index = 0
|
||||
state.affinity.affinityMap = map[string]*affinityState{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) OnEndpointsSynced() {
|
||||
}
|
||||
|
||||
// Tests whether two slices are equivalent. This sorts both slices in-place.
|
||||
func slicesEquiv(lhs, rhs []string) bool {
|
||||
if len(lhs) != len(rhs) {
|
||||
return false
|
||||
}
|
||||
if reflect.DeepEqual(slice.SortStrings(lhs), slice.SortStrings(rhs)) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) {
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
for ip, affinity := range state.affinity.affinityMap {
|
||||
if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
|
||||
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
|
||||
delete(state.affinity.affinityMap, ip)
|
||||
}
|
||||
}
|
||||
}
|
717
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin_test.go
generated
vendored
Normal file
717
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin_test.go
generated
vendored
Normal file
@ -0,0 +1,717 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
func TestValidateWorks(t *testing.T) {
|
||||
if isValidEndpoint(&hostPortPair{}) {
|
||||
t.Errorf("Didn't fail for empty set")
|
||||
}
|
||||
if isValidEndpoint(&hostPortPair{host: "foobar"}) {
|
||||
t.Errorf("Didn't fail with invalid port")
|
||||
}
|
||||
if isValidEndpoint(&hostPortPair{host: "foobar", port: -1}) {
|
||||
t.Errorf("Didn't fail with a negative port")
|
||||
}
|
||||
if !isValidEndpoint(&hostPortPair{host: "foobar", port: 8080}) {
|
||||
t.Errorf("Failed a valid config.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterWorks(t *testing.T) {
|
||||
endpoints := []hostPortPair{
|
||||
{host: "foobar", port: 1},
|
||||
{host: "foobar", port: 2},
|
||||
{host: "foobar", port: -1},
|
||||
{host: "foobar", port: 3},
|
||||
{host: "foobar", port: -2},
|
||||
}
|
||||
filtered := flattenValidEndpoints(endpoints)
|
||||
|
||||
if len(filtered) != 3 {
|
||||
t.Errorf("Failed to filter to the correct size")
|
||||
}
|
||||
if filtered[0] != "foobar:1" {
|
||||
t.Errorf("Index zero is not foobar:1")
|
||||
}
|
||||
if filtered[1] != "foobar:2" {
|
||||
t.Errorf("Index one is not foobar:2")
|
||||
}
|
||||
if filtered[2] != "foobar:3" {
|
||||
t.Errorf("Index two is not foobar:3")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
if len(endpoint) != 0 {
|
||||
t.Errorf("Got an endpoint")
|
||||
}
|
||||
}
|
||||
|
||||
func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
||||
}
|
||||
if endpoint != expected {
|
||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
||||
}
|
||||
if endpoint != expected {
|
||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
|
||||
}},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
}
|
||||
|
||||
func stringsInSlice(haystack []string, needles ...string) bool {
|
||||
for _, needle := range needles {
|
||||
found := false
|
||||
for i := range haystack {
|
||||
if haystack[i] == needle {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found == false {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
|
||||
}},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpointsv1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
|
||||
// Then update the configuration with one fewer endpoints, make sure
|
||||
// we start in the beginning again
|
||||
endpointsv2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
|
||||
// Clear endpoints
|
||||
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
||||
|
||||
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
|
||||
},
|
||||
},
|
||||
}
|
||||
endpoints2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints1)
|
||||
loadBalancer.OnEndpointsAdd(endpoints2)
|
||||
shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
||||
|
||||
shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
|
||||
// Then update the configuration by removing foo
|
||||
loadBalancer.OnEndpointsDelete(endpoints1)
|
||||
endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// but bar is still there, and we continue RR from where we left off.
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call NewService() before OnEndpointsUpdate()
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call OnEndpointsUpdate() before NewService()
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0}
|
||||
client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0}
|
||||
client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpointsv1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
client1Endpoint := shuffledEndpoints[0]
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
client2Endpoint := shuffledEndpoints[1]
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
||||
client3Endpoint := shuffledEndpoints[2]
|
||||
|
||||
endpointsv2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
if client1Endpoint == "endpoint:3" {
|
||||
client1Endpoint = shuffledEndpoints[0]
|
||||
} else if client2Endpoint == "endpoint:3" {
|
||||
client2Endpoint = shuffledEndpoints[0]
|
||||
} else if client3Endpoint == "endpoint:3" {
|
||||
client3Endpoint = shuffledEndpoints[0]
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
||||
|
||||
endpointsv3 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpointsv1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
// Then update the configuration with one fewer endpoints, make sure
|
||||
// we start in the beginning again
|
||||
endpointsv2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
|
||||
// Clear endpoints
|
||||
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
|
||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
||||
|
||||
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints1 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
|
||||
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints2 := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints1)
|
||||
loadBalancer.OnEndpointsAdd(endpoints2)
|
||||
|
||||
shuffledFooEndpoints := loadBalancer.services[fooService].endpoints
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
|
||||
shuffledBarEndpoints := loadBalancer.services[barService].endpoints
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
|
||||
// Then update the configuration by removing foo
|
||||
loadBalancer.OnEndpointsDelete(endpoints1)
|
||||
endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// but bar is still there, and we continue RR from where we left off.
|
||||
shuffledBarEndpoints = loadBalancer.services[barService].endpoints
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call NewService() before OnEndpointsUpdate()
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
|
||||
endpoints := &api.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsAdd(endpoints)
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
||||
}
|
35
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/types.go
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/types.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// ServicePortPortalName carries a namespace + name + portname + portalip. This is the unique
|
||||
// identifier for a windows service port portal.
|
||||
type ServicePortPortalName struct {
|
||||
types.NamespacedName
|
||||
Port string
|
||||
PortalIPName string
|
||||
}
|
||||
|
||||
func (spn ServicePortPortalName) String() string {
|
||||
return fmt.Sprintf("%s:%s:%s", spn.NamespacedName.String(), spn.Port, spn.PortalIPName)
|
||||
}
|
47
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/udp_server.go
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/udp_server.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// udpEchoServer is a simple echo server in UDP, intended for testing the proxy.
|
||||
type udpEchoServer struct {
|
||||
net.PacketConn
|
||||
}
|
||||
|
||||
func (r *udpEchoServer) Loop() {
|
||||
var buffer [4096]byte
|
||||
for {
|
||||
n, cliAddr, err := r.ReadFrom(buffer[0:])
|
||||
if err != nil {
|
||||
fmt.Printf("ReadFrom failed: %v\n", err)
|
||||
continue
|
||||
}
|
||||
r.WriteTo(buffer[0:n], cliAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func newUDPEchoServer() (*udpEchoServer, error) {
|
||||
packetconn, err := net.ListenPacket("udp", ":0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpEchoServer{packetconn}, nil
|
||||
}
|
Reference in New Issue
Block a user