Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -16,14 +16,14 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/proxy",
deps = [
"//pkg/api/service:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/api/v1/service:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/util/net:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -38,7 +38,7 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/proxy/apis/kubeproxyconfig:all-srcs",
"//pkg/proxy/apis/config:all-srcs",
"//pkg/proxy/config:all-srcs",
"//pkg/proxy/healthcheck:all-srcs",
"//pkg/proxy/iptables:all-srcs",
@ -60,11 +60,11 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)

View File

@ -12,3 +12,5 @@ reviewers:
- dcbw
- m1093782566
- danwinship
labels:
- sig/network

42
vendor/k8s.io/kubernetes/pkg/proxy/apis/config/BUILD generated vendored Normal file
View File

@ -0,0 +1,42 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"register.go",
"types.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/config",
deps = [
"//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/proxy/apis/config/fuzzer:all-srcs",
"//pkg/proxy/apis/config/scheme:all-srcs",
"//pkg/proxy/apis/config/v1alpha1:all-srcs",
"//pkg/proxy/apis/config/validation:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -15,5 +15,6 @@ limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=kubeproxy.config.k8s.io
package kubeproxyconfig // import "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
package config // import "k8s.io/kubernetes/pkg/proxy/apis/config"

View File

@ -3,14 +3,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["fuzzer.go"],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/fuzzer",
importpath = "k8s.io/kubernetes/pkg/proxy/apis/config/fuzzer",
visibility = ["//visibility:public"],
deps = [
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
"//pkg/util/pointer:go_default_library",
"//pkg/proxy/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/github.com/google/gofuzz:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@ -24,8 +24,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
utilpointer "k8s.io/utils/pointer"
)
// Funcs returns the fuzzer functions for the kube-proxy apis.

View File

@ -14,36 +14,28 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeproxyconfig
package config
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name use in this package
// GroupName is the group name used in this package
const GroupName = "kubeproxy.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// addKnownTypes registers known types to the given scheme
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this will get cleaned up with the scheme types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&KubeProxyConfiguration{},
)

View File

@ -3,13 +3,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["scheme.go"],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme",
importpath = "k8s.io/kubernetes/pkg/proxy/apis/config/scheme",
visibility = ["//visibility:public"],
deps = [
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//pkg/proxy/apis/config:go_default_library",
"//pkg/proxy/apis/config/v1alpha1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
],
)
@ -32,7 +33,7 @@ go_test(
srcs = ["scheme_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/proxy/apis/kubeproxyconfig/fuzzer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library",
"//pkg/proxy/apis/config/fuzzer:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/apitesting/roundtrip:go_default_library",
],
)

View File

@ -19,8 +19,9 @@ package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1"
)
var (
@ -37,6 +38,6 @@ func init() {
// AddToScheme adds the types of this group into the given scheme.
func AddToScheme(scheme *runtime.Scheme) {
v1alpha1.AddToScheme(scheme)
kubeproxyconfig.AddToScheme(scheme)
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(config.AddToScheme(scheme))
}

View File

@ -19,8 +19,8 @@ package scheme
import (
"testing"
"k8s.io/apimachinery/pkg/api/testing/roundtrip"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/fuzzer"
"k8s.io/apimachinery/pkg/api/apitesting/roundtrip"
"k8s.io/kubernetes/pkg/proxy/apis/config/fuzzer"
)
func TestRoundTripTypes(t *testing.T) {

View File

@ -14,32 +14,17 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeproxyconfig
package config
import (
"fmt"
"sort"
"strings"
apimachineryconfig "k8s.io/apimachinery/pkg/apis/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClientConnectionConfiguration contains details for constructing a client.
type ClientConnectionConfiguration struct {
// kubeconfig is the path to a kubeconfig file.
KubeConfigFile string
// acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
// default value of 'application/json'. This field will control all connections to the server used by a particular
// client.
AcceptContentTypes string
// contentType is the content type used when sending data to the server from this client.
ContentType string
// qps controls the number of queries per second allowed for this connection.
QPS float32
// burst allows extra queries to accumulate when a client is exceeding its rate.
Burst int32
}
// KubeProxyIPTablesConfiguration contains iptables-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPTablesConfiguration struct {
@ -123,7 +108,7 @@ type KubeProxyConfiguration struct {
HostnameOverride string
// clientConnection specifies the kubeconfig file and client connection settings for the proxy
// server to use when communicating with the apiserver.
ClientConnection ClientConnectionConfiguration
ClientConnection apimachineryconfig.ClientConnectionConfiguration
// iptables contains iptables-related configuration options.
IPTables KubeProxyIPTablesConfiguration
// ipvs contains ipvs-related configuration options.
@ -187,7 +172,7 @@ type IPVSSchedulerMethod string
const (
// RoundRobin distributes jobs equally amongst the available real servers.
RoundRobin IPVSSchedulerMethod = "rr"
// WeightedRoundRobin assigns jobs to real servers proportionally to there real servers' weight.
// WeightedRoundRobin assigns jobs to real servers proportionally to their real servers' weight.
// Servers with higher weights receive new jobs first and get more jobs than servers with lower weights.
// Servers with equal weights get an equal distribution of new jobs.
WeightedRoundRobin IPVSSchedulerMethod = "wrr"
@ -214,7 +199,7 @@ const (
DestinationHashing IPVSSchedulerMethod = "dh"
// ShortestExpectedDelay assigns an incoming job to the server with the shortest expected delay.
// The expected delay that the job will experience is (Ci + 1) / Ui if sent to the ith server, in which
// Ci is the number of jobs on the the ith server and Ui is the fixed service rate (weight) of the ith server.
// Ci is the number of jobs on the ith server and Ui is the fixed service rate (weight) of the ith server.
ShortestExpectedDelay IPVSSchedulerMethod = "sed"
// NeverQueue assigns an incoming job to an idle server if there is, instead of waiting for a fast one;
// if all the servers are busy, it adopts the ShortestExpectedDelay policy to assign the job.

View File

@ -11,21 +11,22 @@ go_library(
"defaults.go",
"doc.go",
"register.go",
"types.go",
"zz_generated.conversion.go",
"zz_generated.deepcopy.go",
"zz_generated.defaults.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1",
importpath = "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1",
deps = [
"//pkg/kubelet/qos:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
"//pkg/util/pointer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//pkg/proxy/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/config/v1alpha1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/kube-proxy/config/v1alpha1:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@ -23,16 +23,17 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kruntime "k8s.io/apimachinery/pkg/runtime"
kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/utils/pointer"
)
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) {
func SetDefaults_KubeProxyConfiguration(obj *kubeproxyconfigv1alpha1.KubeProxyConfiguration) {
if len(obj.BindAddress) == 0 {
obj.BindAddress = "0.0.0.0"
}

View File

@ -15,8 +15,10 @@ limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig
// +k8s:openapi-gen=true
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/proxy/apis/config
// +k8s:conversion-gen-external-types=k8s.io/kube-proxy/config/v1alpha1
// +k8s:defaulter-gen=TypeMeta
// +k8s:defaulter-gen-input=../../../../../vendor/k8s.io/kube-proxy/config/v1alpha1
// +groupName=kubeproxy.config.k8s.io
package v1alpha1 // import "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1"
package v1alpha1 // import "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1"

View File

@ -17,34 +17,27 @@ limitations under the License.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1"
)
// GroupName is the group name use in this package
// GroupName is the group name used in this package
const GroupName = "kubeproxy.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
// localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package,
// defaulting and conversion init funcs are registered as well.
localSchemeBuilder = &kubeproxyconfigv1alpha1.SchemeBuilder
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&KubeProxyConfiguration{},
)
return nil
localSchemeBuilder.Register(addDefaultingFuncs)
}

View File

@ -0,0 +1,232 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
configv1alpha1 "k8s.io/apimachinery/pkg/apis/config/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
v1alpha1 "k8s.io/kube-proxy/config/v1alpha1"
config "k8s.io/kubernetes/pkg/proxy/apis/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeProxyConfiguration)(nil), (*config.KubeProxyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(a.(*v1alpha1.KubeProxyConfiguration), b.(*config.KubeProxyConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeProxyConfiguration)(nil), (*v1alpha1.KubeProxyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(a.(*config.KubeProxyConfiguration), b.(*v1alpha1.KubeProxyConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeProxyConntrackConfiguration)(nil), (*config.KubeProxyConntrackConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(a.(*v1alpha1.KubeProxyConntrackConfiguration), b.(*config.KubeProxyConntrackConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeProxyConntrackConfiguration)(nil), (*v1alpha1.KubeProxyConntrackConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(a.(*config.KubeProxyConntrackConfiguration), b.(*v1alpha1.KubeProxyConntrackConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeProxyIPTablesConfiguration)(nil), (*config.KubeProxyIPTablesConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(a.(*v1alpha1.KubeProxyIPTablesConfiguration), b.(*config.KubeProxyIPTablesConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeProxyIPTablesConfiguration)(nil), (*v1alpha1.KubeProxyIPTablesConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(a.(*config.KubeProxyIPTablesConfiguration), b.(*v1alpha1.KubeProxyIPTablesConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeProxyIPVSConfiguration)(nil), (*config.KubeProxyIPVSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(a.(*v1alpha1.KubeProxyIPVSConfiguration), b.(*config.KubeProxyIPVSConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeProxyIPVSConfiguration)(nil), (*v1alpha1.KubeProxyIPVSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(a.(*config.KubeProxyIPVSConfiguration), b.(*v1alpha1.KubeProxyIPVSConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in *v1alpha1.KubeProxyConfiguration, out *config.KubeProxyConfiguration, s conversion.Scope) error {
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
out.BindAddress = in.BindAddress
out.HealthzBindAddress = in.HealthzBindAddress
out.MetricsBindAddress = in.MetricsBindAddress
out.EnableProfiling = in.EnableProfiling
out.ClusterCIDR = in.ClusterCIDR
out.HostnameOverride = in.HostnameOverride
if err := configv1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
return err
}
if err := Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil {
return err
}
if err := Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil {
return err
}
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
out.Mode = config.ProxyMode(in.Mode)
out.PortRange = in.PortRange
out.ResourceContainer = in.ResourceContainer
out.UDPIdleTimeout = in.UDPIdleTimeout
if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
return err
}
out.ConfigSyncPeriod = in.ConfigSyncPeriod
out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses))
return nil
}
// Convert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in *v1alpha1.KubeProxyConfiguration, out *config.KubeProxyConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in, out, s)
}
func autoConvert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *config.KubeProxyConfiguration, out *v1alpha1.KubeProxyConfiguration, s conversion.Scope) error {
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
out.BindAddress = in.BindAddress
out.HealthzBindAddress = in.HealthzBindAddress
out.MetricsBindAddress = in.MetricsBindAddress
out.EnableProfiling = in.EnableProfiling
out.ClusterCIDR = in.ClusterCIDR
out.HostnameOverride = in.HostnameOverride
if err := configv1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
return err
}
if err := Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil {
return err
}
if err := Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil {
return err
}
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
out.Mode = v1alpha1.ProxyMode(in.Mode)
out.PortRange = in.PortRange
out.ResourceContainer = in.ResourceContainer
out.UDPIdleTimeout = in.UDPIdleTimeout
if err := Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
return err
}
out.ConfigSyncPeriod = in.ConfigSyncPeriod
out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses))
return nil
}
// Convert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration is an autogenerated conversion function.
func Convert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *config.KubeProxyConfiguration, out *v1alpha1.KubeProxyConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(in *v1alpha1.KubeProxyConntrackConfiguration, out *config.KubeProxyConntrackConfiguration, s conversion.Scope) error {
out.Max = (*int32)(unsafe.Pointer(in.Max))
out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore))
out.Min = (*int32)(unsafe.Pointer(in.Min))
out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout))
out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout))
return nil
}
// Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(in *v1alpha1.KubeProxyConntrackConfiguration, out *config.KubeProxyConntrackConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(in, out, s)
}
func autoConvert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *config.KubeProxyConntrackConfiguration, out *v1alpha1.KubeProxyConntrackConfiguration, s conversion.Scope) error {
out.Max = (*int32)(unsafe.Pointer(in.Max))
out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore))
out.Min = (*int32)(unsafe.Pointer(in.Min))
out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout))
out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout))
return nil
}
// Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration is an autogenerated conversion function.
func Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *config.KubeProxyConntrackConfiguration, out *v1alpha1.KubeProxyConntrackConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(in *v1alpha1.KubeProxyIPTablesConfiguration, out *config.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
out.MasqueradeAll = in.MasqueradeAll
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
return nil
}
// Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(in *v1alpha1.KubeProxyIPTablesConfiguration, out *config.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(in, out, s)
}
func autoConvert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *config.KubeProxyIPTablesConfiguration, out *v1alpha1.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
out.MasqueradeAll = in.MasqueradeAll
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
return nil
}
// Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration is an autogenerated conversion function.
func Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *config.KubeProxyIPTablesConfiguration, out *v1alpha1.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(in *v1alpha1.KubeProxyIPVSConfiguration, out *config.KubeProxyIPVSConfiguration, s conversion.Scope) error {
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
out.Scheduler = in.Scheduler
out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs))
return nil
}
// Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(in *v1alpha1.KubeProxyIPVSConfiguration, out *config.KubeProxyIPVSConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(in, out, s)
}
func autoConvert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *config.KubeProxyIPVSConfiguration, out *v1alpha1.KubeProxyIPVSConfiguration, s conversion.Scope) error {
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
out.Scheduler = in.Scheduler
out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs))
return nil
}
// Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration is an autogenerated conversion function.
func Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *config.KubeProxyIPVSConfiguration, out *v1alpha1.KubeProxyIPVSConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in, out, s)
}

View File

@ -0,0 +1,21 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1

View File

@ -22,16 +22,19 @@ package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
v1alpha1 "k8s.io/kube-proxy/config/v1alpha1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&KubeProxyConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeProxyConfiguration(obj.(*KubeProxyConfiguration)) })
scheme.AddTypeDefaultingFunc(&v1alpha1.KubeProxyConfiguration{}, func(obj interface{}) {
SetObjectDefaults_KubeProxyConfiguration(obj.(*v1alpha1.KubeProxyConfiguration))
})
return nil
}
func SetObjectDefaults_KubeProxyConfiguration(in *KubeProxyConfiguration) {
func SetObjectDefaults_KubeProxyConfiguration(in *v1alpha1.KubeProxyConfiguration) {
SetDefaults_KubeProxyConfiguration(in)
}

View File

@ -0,0 +1,47 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["validation.go"],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/config/validation",
deps = [
"//pkg/apis/core/validation:go_default_library",
"//pkg/proxy/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["validation_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/proxy/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@ -23,11 +23,12 @@ import (
"strconv"
"strings"
apimachineryconfig "k8s.io/apimachinery/pkg/apis/config"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
)
// Validate validates the configuration of kube-proxy
@ -60,7 +61,9 @@ func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList {
allErrs = append(allErrs, field.Invalid(newPath.Child("BindAddress"), config.BindAddress, "not a valid textual representation of an IP address"))
}
allErrs = append(allErrs, validateHostPort(config.HealthzBindAddress, newPath.Child("HealthzBindAddress"))...)
if config.HealthzBindAddress != "" {
allErrs = append(allErrs, validateHostPort(config.HealthzBindAddress, newPath.Child("HealthzBindAddress"))...)
}
allErrs = append(allErrs, validateHostPort(config.MetricsBindAddress, newPath.Child("MetricsBindAddress"))...)
if config.ClusterCIDR != "" {
@ -184,7 +187,7 @@ func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Pat
return field.ErrorList{field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)}
}
func validateClientConnectionConfiguration(config kubeproxyconfig.ClientConnectionConfiguration, fldPath *field.Path) field.ErrorList {
func validateClientConnectionConfiguration(config apimachineryconfig.ClientConnectionConfiguration, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(config.Burst), fldPath.Child("Burst"))...)
return allErrs

View File

@ -23,10 +23,11 @@ import (
"testing"
"time"
apimachineryconfig "k8s.io/apimachinery/pkg/apis/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
"k8s.io/kubernetes/pkg/util/pointer"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/utils/pointer"
)
func TestValidateKubeProxyConfiguration(t *testing.T) {
@ -55,9 +56,9 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -75,9 +76,29 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
},
{
BindAddress: "192.168.59.103",
HealthzBindAddress: "",
MetricsBindAddress: "127.0.0.1:10249",
ClusterCIDR: "192.168.59.0/24",
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeAll: true,
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -109,9 +130,9 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -133,9 +154,9 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -157,9 +178,9 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -181,9 +202,9 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -205,9 +226,9 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -229,9 +250,9 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
},
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -254,9 +275,9 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
// not specifying valid period in IPVS mode.
Mode: kubeproxyconfig.ProxyModeIPVS,
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -427,16 +448,16 @@ func TestValidateKubeProxyIPVSConfiguration(t *testing.T) {
func TestValidateKubeProxyConntrackConfiguration(t *testing.T) {
successCases := []kubeproxyconfig.KubeProxyConntrackConfiguration{
{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
{
Max: pointer.Int32Ptr(0),
MaxPerCore: pointer.Int32Ptr(0),
Min: pointer.Int32Ptr(0),
Max: pointer.Int32Ptr(0),
MaxPerCore: pointer.Int32Ptr(0),
Min: pointer.Int32Ptr(0),
TCPEstablishedTimeout: &metav1.Duration{Duration: 0 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 0 * time.Second},
},
@ -454,9 +475,9 @@ func TestValidateKubeProxyConntrackConfiguration(t *testing.T) {
}{
{
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(-1),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(-1),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -464,9 +485,9 @@ func TestValidateKubeProxyConntrackConfiguration(t *testing.T) {
},
{
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(-1),
Min: pointer.Int32Ptr(1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(-1),
Min: pointer.Int32Ptr(1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -474,9 +495,9 @@ func TestValidateKubeProxyConntrackConfiguration(t *testing.T) {
},
{
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(-1),
Max: pointer.Int32Ptr(2),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(-1),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -484,9 +505,9 @@ func TestValidateKubeProxyConntrackConfiguration(t *testing.T) {
},
{
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(4),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(3),
Max: pointer.Int32Ptr(4),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(3),
TCPEstablishedTimeout: &metav1.Duration{Duration: -5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
},
@ -494,9 +515,9 @@ func TestValidateKubeProxyConntrackConfiguration(t *testing.T) {
},
{
config: kubeproxyconfig.KubeProxyConntrackConfiguration{
Max: pointer.Int32Ptr(4),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(3),
Max: pointer.Int32Ptr(4),
MaxPerCore: pointer.Int32Ptr(1),
Min: pointer.Int32Ptr(3),
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
TCPCloseWaitTimeout: &metav1.Duration{Duration: -5 * time.Second},
},
@ -554,7 +575,7 @@ func TestValidateProxyMode(t *testing.T) {
func TestValidateClientConnectionConfiguration(t *testing.T) {
newPath := field.NewPath("KubeProxyConfiguration")
successCases := []kubeproxyconfig.ClientConnectionConfiguration{
successCases := []apimachineryconfig.ClientConnectionConfiguration{
{
Burst: 0,
},
@ -570,11 +591,11 @@ func TestValidateClientConnectionConfiguration(t *testing.T) {
}
errorCases := []struct {
ccc kubeproxyconfig.ClientConnectionConfiguration
ccc apimachineryconfig.ClientConnectionConfiguration
msg string
}{
{
ccc: kubeproxyconfig.ClientConnectionConfiguration{Burst: -5},
ccc: apimachineryconfig.ClientConnectionConfiguration{Burst: -5},
msg: "must be greater than or equal to 0",
},
}

View File

@ -18,7 +18,7 @@ limitations under the License.
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
package config
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -26,19 +26,25 @@ import (
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) {
*out = *in
return
func (in ConfigurationMap) DeepCopyInto(out *ConfigurationMap) {
{
in := &in
*out = make(ConfigurationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration.
func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationMap.
func (in ConfigurationMap) DeepCopy() ConfigurationMap {
if in == nil {
return nil
}
out := new(ClientConnectionConfiguration)
out := new(ConfigurationMap)
in.DeepCopyInto(out)
return out
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -57,12 +63,8 @@ func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) {
in.IPVS.DeepCopyInto(&out.IPVS)
if in.OOMScoreAdj != nil {
in, out := &in.OOMScoreAdj, &out.OOMScoreAdj
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
*out = new(int32)
**out = **in
}
out.UDPIdleTimeout = in.UDPIdleTimeout
in.Conntrack.DeepCopyInto(&out.Conntrack)
@ -98,48 +100,28 @@ func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackC
*out = *in
if in.Max != nil {
in, out := &in.Max, &out.Max
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
*out = new(int32)
**out = **in
}
if in.MaxPerCore != nil {
in, out := &in.MaxPerCore, &out.MaxPerCore
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
*out = new(int32)
**out = **in
}
if in.Min != nil {
in, out := &in.Min, &out.Min
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
*out = new(int32)
**out = **in
}
if in.TCPEstablishedTimeout != nil {
in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout
if *in == nil {
*out = nil
} else {
*out = new(v1.Duration)
**out = **in
}
*out = new(v1.Duration)
**out = **in
}
if in.TCPCloseWaitTimeout != nil {
in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout
if *in == nil {
*out = nil
} else {
*out = new(v1.Duration)
**out = **in
}
*out = new(v1.Duration)
**out = **in
}
return
}
@ -159,12 +141,8 @@ func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesCon
*out = *in
if in.MasqueradeBit != nil {
in, out := &in.MasqueradeBit, &out.MasqueradeBit
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
*out = new(int32)
**out = **in
}
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod

View File

@ -1,41 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"register.go",
"types.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig",
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/proxy/apis/kubeproxyconfig/fuzzer:all-srcs",
"//pkg/proxy/apis/kubeproxyconfig/scheme:all-srcs",
"//pkg/proxy/apis/kubeproxyconfig/v1alpha1:all-srcs",
"//pkg/proxy/apis/kubeproxyconfig/validation:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,169 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClientConnectionConfiguration contains details for constructing a client.
type ClientConnectionConfiguration struct {
// kubeconfig is the path to a kubeconfig file.
KubeConfigFile string `json:"kubeconfig"`
// acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
// default value of 'application/json'. This field will control all connections to the server used by a particular
// client.
AcceptContentTypes string `json:"acceptContentTypes"`
// contentType is the content type used when sending data to the server from this client.
ContentType string `json:"contentType"`
// qps controls the number of queries per second allowed for this connection.
QPS float32 `json:"qps"`
// burst allows extra queries to accumulate when a client is exceeding its rate.
Burst int `json:"burst"`
}
// KubeProxyIPTablesConfiguration contains iptables-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPTablesConfiguration struct {
// masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
// the pure iptables proxy mode. Values must be within the range [0, 31].
MasqueradeBit *int32 `json:"masqueradeBit"`
// masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
MasqueradeAll bool `json:"masqueradeAll"`
// syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
SyncPeriod metav1.Duration `json:"syncPeriod"`
// minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m',
// '2h22m').
MinSyncPeriod metav1.Duration `json:"minSyncPeriod"`
}
// KubeProxyIPVSConfiguration contains ipvs-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPVSConfiguration struct {
// syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m',
// '2h22m'). Must be greater than 0.
SyncPeriod metav1.Duration `json:"syncPeriod"`
// minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m',
// '2h22m').
MinSyncPeriod metav1.Duration `json:"minSyncPeriod"`
// ipvs scheduler
Scheduler string `json:"scheduler"`
// excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch
// when cleaning up ipvs services.
ExcludeCIDRs []string `json:"excludeCIDRs"`
}
// KubeProxyConntrackConfiguration contains conntrack settings for
// the Kubernetes proxy server.
type KubeProxyConntrackConfiguration struct {
// max is the maximum number of NAT connections to track (0 to
// leave as-is). This takes precedence over maxPerCore and min.
Max *int32 `json:"max"`
// maxPerCore is the maximum number of NAT connections to track
// per CPU core (0 to leave the limit as-is and ignore min).
MaxPerCore *int32 `json:"maxPerCore"`
// min is the minimum value of connect-tracking records to allocate,
// regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is).
Min *int32 `json:"min"`
// tcpEstablishedTimeout is how long an idle TCP connection will be kept open
// (e.g. '2s'). Must be greater than 0 to set.
TCPEstablishedTimeout *metav1.Duration `json:"tcpEstablishedTimeout"`
// tcpCloseWaitTimeout is how long an idle conntrack entry
// in CLOSE_WAIT state will remain in the conntrack
// table. (e.g. '60s'). Must be greater than 0 to set.
TCPCloseWaitTimeout *metav1.Duration `json:"tcpCloseWaitTimeout"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KubeProxyConfiguration contains everything necessary to configure the
// Kubernetes proxy server.
type KubeProxyConfiguration struct {
metav1.TypeMeta `json:",inline"`
// featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
FeatureGates map[string]bool `json:"featureGates,omitempty"`
// bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0
// for all interfaces)
BindAddress string `json:"bindAddress"`
// healthzBindAddress is the IP address and port for the health check server to serve on,
// defaulting to 0.0.0.0:10256
HealthzBindAddress string `json:"healthzBindAddress"`
// metricsBindAddress is the IP address and port for the metrics server to serve on,
// defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)
MetricsBindAddress string `json:"metricsBindAddress"`
// enableProfiling enables profiling via web interface on /debug/pprof handler.
// Profiling handlers will be handled by metrics server.
EnableProfiling bool `json:"enableProfiling"`
// clusterCIDR is the CIDR range of the pods in the cluster. It is used to
// bridge traffic coming from outside of the cluster. If not provided,
// no off-cluster bridging will be performed.
ClusterCIDR string `json:"clusterCIDR"`
// hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
HostnameOverride string `json:"hostnameOverride"`
// clientConnection specifies the kubeconfig file and client connection settings for the proxy
// server to use when communicating with the apiserver.
ClientConnection ClientConnectionConfiguration `json:"clientConnection"`
// iptables contains iptables-related configuration options.
IPTables KubeProxyIPTablesConfiguration `json:"iptables"`
// ipvs contains ipvs-related configuration options.
IPVS KubeProxyIPVSConfiguration `json:"ipvs"`
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
// the range [-1000, 1000]
OOMScoreAdj *int32 `json:"oomScoreAdj"`
// mode specifies which proxy mode to use.
Mode ProxyMode `json:"mode"`
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
PortRange string `json:"portRange"`
// resourceContainer is the bsolute name of the resource-only container to create and run
// the Kube-proxy in (Default: /kube-proxy).
ResourceContainer string `json:"resourceContainer"`
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
// Must be greater than 0. Only applicable for proxyMode=userspace.
UDPIdleTimeout metav1.Duration `json:"udpIdleTimeout"`
// conntrack contains conntrack-related configuration options.
Conntrack KubeProxyConntrackConfiguration `json:"conntrack"`
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
// than 0.
ConfigSyncPeriod metav1.Duration `json:"configSyncPeriod"`
// nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid
// IP blocks. These values are as a parameter to select the interfaces where nodeport works.
// In case someone would like to expose a service on localhost for local visit and some other interfaces for
// particular purpose, a list of IP blocks would do that.
// If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort.
// If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node.
// An empty string slice is meant to select all network interfaces.
NodePortAddresses []string `json:"nodePortAddresses"`
}
// Currently, three modes of proxy are available in Linux platform: 'userspace' (older, going to be EOL), 'iptables'
// (newer, faster), 'ipvs'(newest, better in performance and scalability).
//
// Two modes of proxy are available in Windows platform: 'userspace'(older, stable) and 'kernelspace' (newer, faster).
//
// In Linux platform, if proxy mode is blank, use the best-available proxy (currently iptables, but may change in the
// future). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are
// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs',
// and the fall back path is firstly iptables and then userspace.
// In Windows platform, if proxy mode is blank, use the best-available proxy (currently userspace, but may change in the
// future). If winkernel proxy is selected, regardless of how, but the Windows kernel can't support this mode of proxy,
// this always falls back to the userspace proxy.
type ProxyMode string

View File

@ -1,229 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(scheme *runtime.Scheme) error {
return scheme.AddGeneratedConversionFuncs(
Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration,
Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration,
Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration,
Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration,
Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration,
Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration,
Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration,
Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration,
Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration,
Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration,
)
}
func autoConvert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in *ClientConnectionConfiguration, out *kubeproxyconfig.ClientConnectionConfiguration, s conversion.Scope) error {
out.KubeConfigFile = in.KubeConfigFile
out.AcceptContentTypes = in.AcceptContentTypes
out.ContentType = in.ContentType
out.QPS = in.QPS
out.Burst = int32(in.Burst)
return nil
}
// Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in *ClientConnectionConfiguration, out *kubeproxyconfig.ClientConnectionConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in, out, s)
}
func autoConvert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in *kubeproxyconfig.ClientConnectionConfiguration, out *ClientConnectionConfiguration, s conversion.Scope) error {
out.KubeConfigFile = in.KubeConfigFile
out.AcceptContentTypes = in.AcceptContentTypes
out.ContentType = in.ContentType
out.QPS = in.QPS
out.Burst = int(in.Burst)
return nil
}
// Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration is an autogenerated conversion function.
func Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in *kubeproxyconfig.ClientConnectionConfiguration, out *ClientConnectionConfiguration, s conversion.Scope) error {
return autoConvert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *kubeproxyconfig.KubeProxyConfiguration, s conversion.Scope) error {
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
out.BindAddress = in.BindAddress
out.HealthzBindAddress = in.HealthzBindAddress
out.MetricsBindAddress = in.MetricsBindAddress
out.EnableProfiling = in.EnableProfiling
out.ClusterCIDR = in.ClusterCIDR
out.HostnameOverride = in.HostnameOverride
if err := Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
return err
}
if err := Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil {
return err
}
if err := Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil {
return err
}
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
out.Mode = kubeproxyconfig.ProxyMode(in.Mode)
out.PortRange = in.PortRange
out.ResourceContainer = in.ResourceContainer
out.UDPIdleTimeout = in.UDPIdleTimeout
if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
return err
}
out.ConfigSyncPeriod = in.ConfigSyncPeriod
out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses))
return nil
}
// Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *kubeproxyconfig.KubeProxyConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in, out, s)
}
func autoConvert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *kubeproxyconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error {
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
out.BindAddress = in.BindAddress
out.HealthzBindAddress = in.HealthzBindAddress
out.MetricsBindAddress = in.MetricsBindAddress
out.EnableProfiling = in.EnableProfiling
out.ClusterCIDR = in.ClusterCIDR
out.HostnameOverride = in.HostnameOverride
if err := Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
return err
}
if err := Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil {
return err
}
if err := Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil {
return err
}
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
out.Mode = ProxyMode(in.Mode)
out.PortRange = in.PortRange
out.ResourceContainer = in.ResourceContainer
out.UDPIdleTimeout = in.UDPIdleTimeout
if err := Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
return err
}
out.ConfigSyncPeriod = in.ConfigSyncPeriod
out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses))
return nil
}
// Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration is an autogenerated conversion function.
func Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *kubeproxyconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error {
return autoConvert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in *KubeProxyConntrackConfiguration, out *kubeproxyconfig.KubeProxyConntrackConfiguration, s conversion.Scope) error {
out.Max = (*int32)(unsafe.Pointer(in.Max))
out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore))
out.Min = (*int32)(unsafe.Pointer(in.Min))
out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout))
out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout))
return nil
}
// Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in *KubeProxyConntrackConfiguration, out *kubeproxyconfig.KubeProxyConntrackConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in, out, s)
}
func autoConvert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *kubeproxyconfig.KubeProxyConntrackConfiguration, out *KubeProxyConntrackConfiguration, s conversion.Scope) error {
out.Max = (*int32)(unsafe.Pointer(in.Max))
out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore))
out.Min = (*int32)(unsafe.Pointer(in.Min))
out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout))
out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout))
return nil
}
// Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration is an autogenerated conversion function.
func Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *kubeproxyconfig.KubeProxyConntrackConfiguration, out *KubeProxyConntrackConfiguration, s conversion.Scope) error {
return autoConvert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in *KubeProxyIPTablesConfiguration, out *kubeproxyconfig.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
out.MasqueradeAll = in.MasqueradeAll
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
return nil
}
// Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in *KubeProxyIPTablesConfiguration, out *kubeproxyconfig.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in, out, s)
}
func autoConvert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *kubeproxyconfig.KubeProxyIPTablesConfiguration, out *KubeProxyIPTablesConfiguration, s conversion.Scope) error {
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
out.MasqueradeAll = in.MasqueradeAll
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
return nil
}
// Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration is an autogenerated conversion function.
func Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *kubeproxyconfig.KubeProxyIPTablesConfiguration, out *KubeProxyIPTablesConfiguration, s conversion.Scope) error {
return autoConvert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in *KubeProxyIPVSConfiguration, out *kubeproxyconfig.KubeProxyIPVSConfiguration, s conversion.Scope) error {
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
out.Scheduler = in.Scheduler
out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs))
return nil
}
// Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in *KubeProxyIPVSConfiguration, out *kubeproxyconfig.KubeProxyIPVSConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in, out, s)
}
func autoConvert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *kubeproxyconfig.KubeProxyIPVSConfiguration, out *KubeProxyIPVSConfiguration, s conversion.Scope) error {
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
out.Scheduler = in.Scheduler
out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs))
return nil
}
// Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration is an autogenerated conversion function.
func Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *kubeproxyconfig.KubeProxyIPVSConfiguration, out *KubeProxyIPVSConfiguration, s conversion.Scope) error {
return autoConvert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in, out, s)
}

View File

@ -1,45 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["validation.go"],
importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation",
deps = [
"//pkg/apis/core/validation:go_default_library",
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["validation_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/proxy/apis/kubeproxyconfig:go_default_library",
"//pkg/util/pointer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
],
)

View File

@ -1,227 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package kubeproxyconfig
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration.
func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration {
if in == nil {
return nil
}
out := new(ClientConnectionConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ConfigurationMap) DeepCopyInto(out *ConfigurationMap) {
{
in := &in
*out = make(ConfigurationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationMap.
func (in ConfigurationMap) DeepCopy() ConfigurationMap {
if in == nil {
return nil
}
out := new(ConfigurationMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]bool, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.ClientConnection = in.ClientConnection
in.IPTables.DeepCopyInto(&out.IPTables)
in.IPVS.DeepCopyInto(&out.IPVS)
if in.OOMScoreAdj != nil {
in, out := &in.OOMScoreAdj, &out.OOMScoreAdj
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
out.UDPIdleTimeout = in.UDPIdleTimeout
in.Conntrack.DeepCopyInto(&out.Conntrack)
out.ConfigSyncPeriod = in.ConfigSyncPeriod
if in.NodePortAddresses != nil {
in, out := &in.NodePortAddresses, &out.NodePortAddresses
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration.
func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) {
*out = *in
if in.Max != nil {
in, out := &in.Max, &out.Max
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.MaxPerCore != nil {
in, out := &in.MaxPerCore, &out.MaxPerCore
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.Min != nil {
in, out := &in.Min, &out.Min
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.TCPEstablishedTimeout != nil {
in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout
if *in == nil {
*out = nil
} else {
*out = new(v1.Duration)
**out = **in
}
}
if in.TCPCloseWaitTimeout != nil {
in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout
if *in == nil {
*out = nil
} else {
*out = new(v1.Duration)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration.
func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyConntrackConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) {
*out = *in
if in.MasqueradeBit != nil {
in, out := &in.MasqueradeBit, &out.MasqueradeBit
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration.
func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyIPTablesConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) {
*out = *in
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
if in.ExcludeCIDRs != nil {
in, out := &in.ExcludeCIDRs, &out.ExcludeCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration.
func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyIPVSConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@ -14,13 +14,13 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/proxy/config",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library",
"//pkg/client/listers/core/internalversion:go_default_library",
"//pkg/controller:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -32,14 +32,14 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
],
)

View File

@ -22,25 +22,25 @@ import (
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
informers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
ktesting "k8s.io/client-go/testing"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
)
func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
service1v1 := &api.Service{
service1v1 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s1"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}}}
service1v2 := &api.Service{
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 10}}}}
service1v2 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s1"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}}}
service2 := &api.Service{
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 20}}}}
service2 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s2"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 30}}}}
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 30}}}}
// Setup fake api client.
client := fake.NewSimpleClientset()
@ -54,59 +54,59 @@ func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
serviceConfig := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
serviceConfig := NewServiceConfig(sharedInformers.Core().V1().Services(), time.Minute)
serviceConfig.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh)
go serviceConfig.Run(stopCh)
// Add the first service
fakeWatch.Add(service1v1)
handler.ValidateServices(t, []*api.Service{service1v1})
handler.ValidateServices(t, []*v1.Service{service1v1})
// Add another service
fakeWatch.Add(service2)
handler.ValidateServices(t, []*api.Service{service1v1, service2})
handler.ValidateServices(t, []*v1.Service{service1v1, service2})
// Modify service1
fakeWatch.Modify(service1v2)
handler.ValidateServices(t, []*api.Service{service1v2, service2})
handler.ValidateServices(t, []*v1.Service{service1v2, service2})
// Delete service1
fakeWatch.Delete(service1v2)
handler.ValidateServices(t, []*api.Service{service2})
handler.ValidateServices(t, []*v1.Service{service2})
// Delete service2
fakeWatch.Delete(service2)
handler.ValidateServices(t, []*api.Service{})
handler.ValidateServices(t, []*v1.Service{})
}
func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) {
endpoints1v1 := &api.Endpoints{
endpoints1v1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e1"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{
{IP: "1.2.3.4"},
},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}},
}
endpoints1v2 := &api.Endpoints{
endpoints1v2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e1"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "4.3.2.1"},
},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}},
}
endpoints2 := &api.Endpoints{
endpoints2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e2"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{
{IP: "5.6.7.8"},
},
Ports: []api.EndpointPort{{Port: 80, Protocol: "TCP"}},
Ports: []v1.EndpointPort{{Port: 80, Protocol: "TCP"}},
}},
}
@ -122,37 +122,37 @@ func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
endpointsConfig := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
endpointsConfig := NewEndpointsConfig(sharedInformers.Core().V1().Endpoints(), time.Minute)
endpointsConfig.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh)
go endpointsConfig.Run(stopCh)
// Add the first endpoints
fakeWatch.Add(endpoints1v1)
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints1v1})
handler.ValidateEndpoints(t, []*v1.Endpoints{endpoints1v1})
// Add another endpoints
fakeWatch.Add(endpoints2)
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints1v1, endpoints2})
handler.ValidateEndpoints(t, []*v1.Endpoints{endpoints1v1, endpoints2})
// Modify endpoints1
fakeWatch.Modify(endpoints1v2)
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints1v2, endpoints2})
handler.ValidateEndpoints(t, []*v1.Endpoints{endpoints1v2, endpoints2})
// Delete endpoints1
fakeWatch.Delete(endpoints1v2)
handler.ValidateEndpoints(t, []*api.Endpoints{endpoints2})
handler.ValidateEndpoints(t, []*v1.Endpoints{endpoints2})
// Delete endpoints2
fakeWatch.Delete(endpoints2)
handler.ValidateEndpoints(t, []*api.Endpoints{})
handler.ValidateEndpoints(t, []*v1.Endpoints{})
}
func newSvcHandler(t *testing.T, svcs []*api.Service, done func()) ServiceHandler {
func newSvcHandler(t *testing.T, svcs []*v1.Service, done func()) ServiceHandler {
shm := &ServiceHandlerMock{
state: make(map[types.NamespacedName]*api.Service),
state: make(map[types.NamespacedName]*v1.Service),
}
shm.process = func(services []*api.Service) {
shm.process = func(services []*v1.Service) {
defer done()
if !reflect.DeepEqual(services, svcs) {
t.Errorf("Unexpected services: %#v, expected: %#v", services, svcs)
@ -161,11 +161,11 @@ func newSvcHandler(t *testing.T, svcs []*api.Service, done func()) ServiceHandle
return shm
}
func newEpsHandler(t *testing.T, eps []*api.Endpoints, done func()) EndpointsHandler {
func newEpsHandler(t *testing.T, eps []*v1.Endpoints, done func()) EndpointsHandler {
ehm := &EndpointsHandlerMock{
state: make(map[types.NamespacedName]*api.Endpoints),
state: make(map[types.NamespacedName]*v1.Endpoints),
}
ehm.process = func(endpoints []*api.Endpoints) {
ehm.process = func(endpoints []*v1.Endpoints) {
defer done()
if !reflect.DeepEqual(eps, endpoints) {
t.Errorf("Unexpected endpoints: %#v, expected: %#v", endpoints, eps)
@ -175,18 +175,18 @@ func newEpsHandler(t *testing.T, eps []*api.Endpoints, done func()) EndpointsHan
}
func TestInitialSync(t *testing.T) {
svc1 := &api.Service{
svc1 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 10}}},
}
svc2 := &api.Service{
svc2 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 10}}},
}
eps1 := &api.Endpoints{
eps1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
}
eps2 := &api.Endpoints{
eps2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
}
@ -198,11 +198,11 @@ func TestInitialSync(t *testing.T) {
client := fake.NewSimpleClientset(svc1, svc2, eps2, eps1)
sharedInformers := informers.NewSharedInformerFactory(client, 0)
svcConfig := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), 0)
epsConfig := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), 0)
svcHandler := newSvcHandler(t, []*api.Service{svc2, svc1}, wg.Done)
svcConfig := NewServiceConfig(sharedInformers.Core().V1().Services(), 0)
epsConfig := NewEndpointsConfig(sharedInformers.Core().V1().Endpoints(), 0)
svcHandler := newSvcHandler(t, []*v1.Service{svc2, svc1}, wg.Done)
svcConfig.RegisterEventHandler(svcHandler)
epsHandler := newEpsHandler(t, []*api.Endpoints{eps2, eps1}, wg.Done)
epsHandler := newEpsHandler(t, []*v1.Endpoints{eps2, eps1}, wg.Done)
epsConfig.RegisterEventHandler(epsHandler)
stopCh := make(chan struct{})

View File

@ -20,12 +20,12 @@ import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
coreinformers "k8s.io/client-go/informers/core/v1"
listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
api "k8s.io/kubernetes/pkg/apis/core"
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion"
listers "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/controller"
)
@ -34,13 +34,13 @@ import (
type ServiceHandler interface {
// OnServiceAdd is called whenever creation of new service object
// is observed.
OnServiceAdd(service *api.Service)
OnServiceAdd(service *v1.Service)
// OnServiceUpdate is called whenever modification of an existing
// service object is observed.
OnServiceUpdate(oldService, service *api.Service)
OnServiceUpdate(oldService, service *v1.Service)
// OnServiceDelete is called whenever deletion of an existing service
// object is observed.
OnServiceDelete(service *api.Service)
OnServiceDelete(service *v1.Service)
// OnServiceSynced is called once all the initial even handlers were
// called and the state is fully propagated to local cache.
OnServiceSynced()
@ -51,13 +51,13 @@ type ServiceHandler interface {
type EndpointsHandler interface {
// OnEndpointsAdd is called whenever creation of new endpoints object
// is observed.
OnEndpointsAdd(endpoints *api.Endpoints)
OnEndpointsAdd(endpoints *v1.Endpoints)
// OnEndpointsUpdate is called whenever modification of an existing
// endpoints object is observed.
OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints)
OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints)
// OnEndpointsDelete is called whever deletion of an existing endpoints
// object is observed.
OnEndpointsDelete(endpoints *api.Endpoints)
OnEndpointsDelete(endpoints *v1.Endpoints)
// OnEndpointsSynced is called once all the initial event handlers were
// called and the state is fully propagated to local cache.
OnEndpointsSynced()
@ -99,15 +99,15 @@ func (c *EndpointsConfig) RegisterEventHandler(handler EndpointsHandler) {
func (c *EndpointsConfig) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Info("Starting endpoints config controller")
defer glog.Info("Shutting down endpoints config controller")
klog.Info("Starting endpoints config controller")
defer klog.Info("Shutting down endpoints config controller")
if !controller.WaitForCacheSync("endpoints config", stopCh, c.listerSynced) {
return
}
for i := range c.eventHandlers {
glog.V(3).Infof("Calling handler.OnEndpointsSynced()")
klog.V(3).Infof("Calling handler.OnEndpointsSynced()")
c.eventHandlers[i].OnEndpointsSynced()
}
@ -115,49 +115,49 @@ func (c *EndpointsConfig) Run(stopCh <-chan struct{}) {
}
func (c *EndpointsConfig) handleAddEndpoints(obj interface{}) {
endpoints, ok := obj.(*api.Endpoints)
endpoints, ok := obj.(*v1.Endpoints)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnEndpointsAdd")
klog.V(4).Infof("Calling handler.OnEndpointsAdd")
c.eventHandlers[i].OnEndpointsAdd(endpoints)
}
}
func (c *EndpointsConfig) handleUpdateEndpoints(oldObj, newObj interface{}) {
oldEndpoints, ok := oldObj.(*api.Endpoints)
oldEndpoints, ok := oldObj.(*v1.Endpoints)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", oldObj))
return
}
endpoints, ok := newObj.(*api.Endpoints)
endpoints, ok := newObj.(*v1.Endpoints)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj))
return
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnEndpointsUpdate")
klog.V(4).Infof("Calling handler.OnEndpointsUpdate")
c.eventHandlers[i].OnEndpointsUpdate(oldEndpoints, endpoints)
}
}
func (c *EndpointsConfig) handleDeleteEndpoints(obj interface{}) {
endpoints, ok := obj.(*api.Endpoints)
endpoints, ok := obj.(*v1.Endpoints)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
if endpoints, ok = tombstone.Obj.(*api.Endpoints); !ok {
if endpoints, ok = tombstone.Obj.(*v1.Endpoints); !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnEndpointsDelete")
klog.V(4).Infof("Calling handler.OnEndpointsDelete")
c.eventHandlers[i].OnEndpointsDelete(endpoints)
}
}
@ -199,15 +199,15 @@ func (c *ServiceConfig) RegisterEventHandler(handler ServiceHandler) {
func (c *ServiceConfig) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Info("Starting service config controller")
defer glog.Info("Shutting down service config controller")
klog.Info("Starting service config controller")
defer klog.Info("Shutting down service config controller")
if !controller.WaitForCacheSync("service config", stopCh, c.listerSynced) {
return
}
for i := range c.eventHandlers {
glog.V(3).Infof("Calling handler.OnServiceSynced()")
klog.V(3).Info("Calling handler.OnServiceSynced()")
c.eventHandlers[i].OnServiceSynced()
}
@ -215,49 +215,49 @@ func (c *ServiceConfig) Run(stopCh <-chan struct{}) {
}
func (c *ServiceConfig) handleAddService(obj interface{}) {
service, ok := obj.(*api.Service)
service, ok := obj.(*v1.Service)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnServiceAdd")
klog.V(4).Info("Calling handler.OnServiceAdd")
c.eventHandlers[i].OnServiceAdd(service)
}
}
func (c *ServiceConfig) handleUpdateService(oldObj, newObj interface{}) {
oldService, ok := oldObj.(*api.Service)
oldService, ok := oldObj.(*v1.Service)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", oldObj))
return
}
service, ok := newObj.(*api.Service)
service, ok := newObj.(*v1.Service)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj))
return
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnServiceUpdate")
klog.V(4).Info("Calling handler.OnServiceUpdate")
c.eventHandlers[i].OnServiceUpdate(oldService, service)
}
}
func (c *ServiceConfig) handleDeleteService(obj interface{}) {
service, ok := obj.(*api.Service)
service, ok := obj.(*v1.Service)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
if service, ok = tombstone.Obj.(*api.Service); !ok {
if service, ok = tombstone.Obj.(*v1.Service); !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
return
}
}
for i := range c.eventHandlers {
glog.V(4).Infof("Calling handler.OnServiceDelete")
klog.V(4).Info("Calling handler.OnServiceDelete")
c.eventHandlers[i].OnServiceDelete(service)
}
}

View File

@ -23,17 +23,17 @@ import (
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
informers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
ktesting "k8s.io/client-go/testing"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
)
type sortedServices []*api.Service
type sortedServices []*v1.Service
func (s sortedServices) Len() int {
return len(s)
@ -48,24 +48,24 @@ func (s sortedServices) Less(i, j int) bool {
type ServiceHandlerMock struct {
lock sync.Mutex
state map[types.NamespacedName]*api.Service
state map[types.NamespacedName]*v1.Service
synced bool
updated chan []*api.Service
process func([]*api.Service)
updated chan []*v1.Service
process func([]*v1.Service)
}
func NewServiceHandlerMock() *ServiceHandlerMock {
shm := &ServiceHandlerMock{
state: make(map[types.NamespacedName]*api.Service),
updated: make(chan []*api.Service, 5),
state: make(map[types.NamespacedName]*v1.Service),
updated: make(chan []*v1.Service, 5),
}
shm.process = func(services []*api.Service) {
shm.process = func(services []*v1.Service) {
shm.updated <- services
}
return shm
}
func (h *ServiceHandlerMock) OnServiceAdd(service *api.Service) {
func (h *ServiceHandlerMock) OnServiceAdd(service *v1.Service) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
@ -73,7 +73,7 @@ func (h *ServiceHandlerMock) OnServiceAdd(service *api.Service) {
h.sendServices()
}
func (h *ServiceHandlerMock) OnServiceUpdate(oldService, service *api.Service) {
func (h *ServiceHandlerMock) OnServiceUpdate(oldService, service *v1.Service) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
@ -81,7 +81,7 @@ func (h *ServiceHandlerMock) OnServiceUpdate(oldService, service *api.Service) {
h.sendServices()
}
func (h *ServiceHandlerMock) OnServiceDelete(service *api.Service) {
func (h *ServiceHandlerMock) OnServiceDelete(service *v1.Service) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
@ -100,7 +100,7 @@ func (h *ServiceHandlerMock) sendServices() {
if !h.synced {
return
}
services := make([]*api.Service, 0, len(h.state))
services := make([]*v1.Service, 0, len(h.state))
for _, svc := range h.state {
services = append(services, svc)
}
@ -108,11 +108,11 @@ func (h *ServiceHandlerMock) sendServices() {
h.process(services)
}
func (h *ServiceHandlerMock) ValidateServices(t *testing.T, expectedServices []*api.Service) {
func (h *ServiceHandlerMock) ValidateServices(t *testing.T, expectedServices []*v1.Service) {
// We might get 1 or more updates for N service updates, because we
// over write older snapshots of services from the producer go-routine
// if the consumer falls behind.
var services []*api.Service
var services []*v1.Service
for {
select {
case services = <-h.updated:
@ -128,7 +128,7 @@ func (h *ServiceHandlerMock) ValidateServices(t *testing.T, expectedServices []*
}
}
type sortedEndpoints []*api.Endpoints
type sortedEndpoints []*v1.Endpoints
func (s sortedEndpoints) Len() int {
return len(s)
@ -143,24 +143,24 @@ func (s sortedEndpoints) Less(i, j int) bool {
type EndpointsHandlerMock struct {
lock sync.Mutex
state map[types.NamespacedName]*api.Endpoints
state map[types.NamespacedName]*v1.Endpoints
synced bool
updated chan []*api.Endpoints
process func([]*api.Endpoints)
updated chan []*v1.Endpoints
process func([]*v1.Endpoints)
}
func NewEndpointsHandlerMock() *EndpointsHandlerMock {
ehm := &EndpointsHandlerMock{
state: make(map[types.NamespacedName]*api.Endpoints),
updated: make(chan []*api.Endpoints, 5),
state: make(map[types.NamespacedName]*v1.Endpoints),
updated: make(chan []*v1.Endpoints, 5),
}
ehm.process = func(endpoints []*api.Endpoints) {
ehm.process = func(endpoints []*v1.Endpoints) {
ehm.updated <- endpoints
}
return ehm
}
func (h *EndpointsHandlerMock) OnEndpointsAdd(endpoints *api.Endpoints) {
func (h *EndpointsHandlerMock) OnEndpointsAdd(endpoints *v1.Endpoints) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
@ -168,7 +168,7 @@ func (h *EndpointsHandlerMock) OnEndpointsAdd(endpoints *api.Endpoints) {
h.sendEndpoints()
}
func (h *EndpointsHandlerMock) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
func (h *EndpointsHandlerMock) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
@ -176,7 +176,7 @@ func (h *EndpointsHandlerMock) OnEndpointsUpdate(oldEndpoints, endpoints *api.En
h.sendEndpoints()
}
func (h *EndpointsHandlerMock) OnEndpointsDelete(endpoints *api.Endpoints) {
func (h *EndpointsHandlerMock) OnEndpointsDelete(endpoints *v1.Endpoints) {
h.lock.Lock()
defer h.lock.Unlock()
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
@ -195,7 +195,7 @@ func (h *EndpointsHandlerMock) sendEndpoints() {
if !h.synced {
return
}
endpoints := make([]*api.Endpoints, 0, len(h.state))
endpoints := make([]*v1.Endpoints, 0, len(h.state))
for _, eps := range h.state {
endpoints = append(endpoints, eps)
}
@ -203,11 +203,11 @@ func (h *EndpointsHandlerMock) sendEndpoints() {
h.process(endpoints)
}
func (h *EndpointsHandlerMock) ValidateEndpoints(t *testing.T, expectedEndpoints []*api.Endpoints) {
func (h *EndpointsHandlerMock) ValidateEndpoints(t *testing.T, expectedEndpoints []*v1.Endpoints) {
// We might get 1 or more updates for N endpoint updates, because we
// over write older snapshots of endpoints from the producer go-routine
// if the consumer falls behind. Unittests will hard timeout in 5m.
var endpoints []*api.Endpoints
var endpoints []*v1.Endpoints
for {
select {
case endpoints = <-h.updated:
@ -233,18 +233,18 @@ func TestNewServiceAddedAndNotified(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
config := NewServiceConfig(sharedInformers.Core().V1().Services(), time.Minute)
handler := NewServiceHandlerMock()
config.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh)
go config.Run(stopCh)
service := &api.Service{
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 10}}},
}
fakeWatch.Add(service)
handler.ValidateServices(t, []*api.Service{service})
handler.ValidateServices(t, []*v1.Service{service})
}
func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
@ -257,29 +257,29 @@ func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
config := NewServiceConfig(sharedInformers.Core().V1().Services(), time.Minute)
handler := NewServiceHandlerMock()
config.RegisterEventHandler(handler)
go sharedInformers.Start(stopCh)
go config.Run(stopCh)
service1 := &api.Service{
service1 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 10}}},
}
fakeWatch.Add(service1)
handler.ValidateServices(t, []*api.Service{service1})
handler.ValidateServices(t, []*v1.Service{service1})
service2 := &api.Service{
service2 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}},
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 20}}},
}
fakeWatch.Add(service2)
services := []*api.Service{service2, service1}
services := []*v1.Service{service2, service1}
handler.ValidateServices(t, services)
fakeWatch.Delete(service1)
services = []*api.Service{service2}
services = []*v1.Service{service2}
handler.ValidateServices(t, services)
}
@ -293,7 +293,7 @@ func TestNewServicesMultipleHandlersAddedAndNotified(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
config := NewServiceConfig(sharedInformers.Core().V1().Services(), time.Minute)
handler := NewServiceHandlerMock()
handler2 := NewServiceHandlerMock()
config.RegisterEventHandler(handler)
@ -301,18 +301,18 @@ func TestNewServicesMultipleHandlersAddedAndNotified(t *testing.T) {
go sharedInformers.Start(stopCh)
go config.Run(stopCh)
service1 := &api.Service{
service1 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 10}}},
}
service2 := &api.Service{
service2 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}},
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Protocol: "TCP", Port: 20}}},
}
fakeWatch.Add(service1)
fakeWatch.Add(service2)
services := []*api.Service{service2, service1}
services := []*v1.Service{service2, service1}
handler.ValidateServices(t, services)
handler2.ValidateServices(t, services)
}
@ -327,7 +327,7 @@ func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
config := NewEndpointsConfig(sharedInformers.Core().V1().Endpoints(), time.Minute)
handler := NewEndpointsHandlerMock()
handler2 := NewEndpointsHandlerMock()
config.RegisterEventHandler(handler)
@ -335,24 +335,24 @@ func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
go sharedInformers.Start(stopCh)
go config.Run(stopCh)
endpoints1 := &api.Endpoints{
endpoints1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}},
Ports: []api.EndpointPort{{Port: 80}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}},
Ports: []v1.EndpointPort{{Port: 80}},
}},
}
endpoints2 := &api.Endpoints{
endpoints2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "3.3.3.3"}, {IP: "4.4.4.4"}},
Ports: []api.EndpointPort{{Port: 80}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "3.3.3.3"}, {IP: "4.4.4.4"}},
Ports: []v1.EndpointPort{{Port: 80}},
}},
}
fakeWatch.Add(endpoints1)
fakeWatch.Add(endpoints2)
endpoints := []*api.Endpoints{endpoints2, endpoints1}
endpoints := []*v1.Endpoints{endpoints2, endpoints1}
handler.ValidateEndpoints(t, endpoints)
handler2.ValidateEndpoints(t, endpoints)
}
@ -367,7 +367,7 @@ func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
config := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
config := NewEndpointsConfig(sharedInformers.Core().V1().Endpoints(), time.Minute)
handler := NewEndpointsHandlerMock()
handler2 := NewEndpointsHandlerMock()
config.RegisterEventHandler(handler)
@ -375,56 +375,56 @@ func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) {
go sharedInformers.Start(stopCh)
go config.Run(stopCh)
endpoints1 := &api.Endpoints{
endpoints1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}},
Ports: []api.EndpointPort{{Port: 80}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}},
Ports: []v1.EndpointPort{{Port: 80}},
}},
}
endpoints2 := &api.Endpoints{
endpoints2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "3.3.3.3"}, {IP: "4.4.4.4"}},
Ports: []api.EndpointPort{{Port: 80}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "3.3.3.3"}, {IP: "4.4.4.4"}},
Ports: []v1.EndpointPort{{Port: 80}},
}},
}
fakeWatch.Add(endpoints1)
fakeWatch.Add(endpoints2)
endpoints := []*api.Endpoints{endpoints2, endpoints1}
endpoints := []*v1.Endpoints{endpoints2, endpoints1}
handler.ValidateEndpoints(t, endpoints)
handler2.ValidateEndpoints(t, endpoints)
// Add one more
endpoints3 := &api.Endpoints{
endpoints3 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foobar"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "5.5.5.5"}, {IP: "6.6.6.6"}},
Ports: []api.EndpointPort{{Port: 80}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "5.5.5.5"}, {IP: "6.6.6.6"}},
Ports: []v1.EndpointPort{{Port: 80}},
}},
}
fakeWatch.Add(endpoints3)
endpoints = []*api.Endpoints{endpoints2, endpoints1, endpoints3}
endpoints = []*v1.Endpoints{endpoints2, endpoints1, endpoints3}
handler.ValidateEndpoints(t, endpoints)
handler2.ValidateEndpoints(t, endpoints)
// Update the "foo" service with new endpoints
endpoints1v2 := &api.Endpoints{
endpoints1v2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "7.7.7.7"}},
Ports: []api.EndpointPort{{Port: 80}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "7.7.7.7"}},
Ports: []v1.EndpointPort{{Port: 80}},
}},
}
fakeWatch.Modify(endpoints1v2)
endpoints = []*api.Endpoints{endpoints2, endpoints1v2, endpoints3}
endpoints = []*v1.Endpoints{endpoints2, endpoints1v2, endpoints3}
handler.ValidateEndpoints(t, endpoints)
handler2.ValidateEndpoints(t, endpoints)
// Remove "bar" endpoints
fakeWatch.Delete(endpoints2)
endpoints = []*api.Endpoints{endpoints1v2, endpoints3}
endpoints = []*v1.Endpoints{endpoints1v2, endpoints3}
handler.ValidateEndpoints(t, endpoints)
handler2.ValidateEndpoints(t, endpoints)
}

View File

@ -22,12 +22,12 @@ import (
"strconv"
"sync"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
utilnet "k8s.io/kubernetes/pkg/util/net"
)
@ -113,7 +113,7 @@ func NewEndpointChangeTracker(hostname string, makeEndpointInfo makeEndpointFunc
// - pass <oldEndpoints, endpoints> as the <previous, current> pair.
// Delete item
// - pass <endpoints, nil> as the <previous, current> pair.
func (ect *EndpointChangeTracker) Update(previous, current *api.Endpoints) bool {
func (ect *EndpointChangeTracker) Update(previous, current *v1.Endpoints) bool {
endpoints := current
if endpoints == nil {
endpoints = previous
@ -184,7 +184,7 @@ type EndpointsMap map[ServicePortName][]Endpoint
// This function is used for incremental updated of endpointsMap.
//
// NOTE: endpoints object should NOT be modified.
func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *api.Endpoints) EndpointsMap {
func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoints) EndpointsMap {
if endpoints == nil {
return nil
}
@ -197,7 +197,7 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *api.Endpoin
for i := range ss.Ports {
port := &ss.Ports[i]
if port.Port == 0 {
glog.Warningf("ignoring invalid endpoint port %s", port.Name)
klog.Warningf("ignoring invalid endpoint port %s", port.Name)
continue
}
svcPortName := ServicePortName{
@ -207,7 +207,7 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *api.Endpoin
for i := range ss.Addresses {
addr := &ss.Addresses[i]
if addr.IP == "" {
glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
klog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
continue
}
// Filter out the incorrect IP version case.
@ -226,12 +226,12 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *api.Endpoin
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], baseEndpointInfo)
}
}
if glog.V(3) {
if klog.V(3) {
newEPList := []string{}
for _, ep := range endpointsMap[svcPortName] {
newEPList = append(newEPList, ep.String())
}
glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
klog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
}
}
}
@ -299,7 +299,7 @@ func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, stale
}
}
if stale {
glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.String())
klog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.String())
*staleEndpoints = append(*staleEndpoints, ServiceEndpoint{Endpoint: ep.String(), ServicePortName: svcPortName})
}
}

View File

@ -22,21 +22,21 @@ import (
"github.com/davecgh/go-spew/spew"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
api "k8s.io/kubernetes/pkg/apis/core"
)
func (proxier *FakeProxier) addEndpoints(endpoints *api.Endpoints) {
func (proxier *FakeProxier) addEndpoints(endpoints *v1.Endpoints) {
proxier.endpointsChanges.Update(nil, endpoints)
}
func (proxier *FakeProxier) updateEndpoints(oldEndpoints, endpoints *api.Endpoints) {
func (proxier *FakeProxier) updateEndpoints(oldEndpoints, endpoints *v1.Endpoints) {
proxier.endpointsChanges.Update(oldEndpoints, endpoints)
}
func (proxier *FakeProxier) deleteEndpoints(endpoints *api.Endpoints) {
func (proxier *FakeProxier) deleteEndpoints(endpoints *v1.Endpoints) {
proxier.endpointsChanges.Update(endpoints, nil)
}
@ -118,8 +118,8 @@ func TestGetLocalEndpointIPs(t *testing.T) {
}
}
func makeTestEndpoints(namespace, name string, eptFunc func(*api.Endpoints)) *api.Endpoints {
ept := &api.Endpoints{
func makeTestEndpoints(namespace, name string, eptFunc func(*v1.Endpoints)) *v1.Endpoints {
ept := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
@ -138,24 +138,24 @@ func TestEndpointsToEndpointsMap(t *testing.T) {
testCases := []struct {
desc string
newEndpoints *api.Endpoints
newEndpoints *v1.Endpoints
expected map[ServicePortName][]*BaseEndpointInfo
isIPv6Mode *bool
}{
{
desc: "nothing",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {}),
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {}),
expected: map[ServicePortName][]*BaseEndpointInfo{},
},
{
desc: "no changes, unnamed port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "",
Port: 11,
}},
@ -170,13 +170,13 @@ func TestEndpointsToEndpointsMap(t *testing.T) {
},
{
desc: "no changes, named port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "port",
Port: 11,
}},
@ -191,13 +191,13 @@ func TestEndpointsToEndpointsMap(t *testing.T) {
},
{
desc: "new port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Port: 11,
}},
},
@ -211,20 +211,20 @@ func TestEndpointsToEndpointsMap(t *testing.T) {
},
{
desc: "remove port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {}),
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {}),
expected: map[ServicePortName][]*BaseEndpointInfo{},
},
{
desc: "new IP and port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "2.2.2.2",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p1",
Port: 11,
}, {
@ -247,13 +247,13 @@ func TestEndpointsToEndpointsMap(t *testing.T) {
},
{
desc: "remove IP and port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p1",
Port: 11,
}},
@ -268,13 +268,13 @@ func TestEndpointsToEndpointsMap(t *testing.T) {
},
{
desc: "rename port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p2",
Port: 11,
}},
@ -289,13 +289,13 @@ func TestEndpointsToEndpointsMap(t *testing.T) {
},
{
desc: "renumber port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p1",
Port: 22,
}},
@ -310,15 +310,15 @@ func TestEndpointsToEndpointsMap(t *testing.T) {
},
{
desc: "should omit IPv6 address in IPv4 mode",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "2001:db8:85a3:0:0:8a2e:370:7334",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p1",
Port: 11,
}, {
@ -340,15 +340,15 @@ func TestEndpointsToEndpointsMap(t *testing.T) {
},
{
desc: "should omit IPv4 address in IPv6 mode",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "2001:db8:85a3:0:0:8a2e:370:7334",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p1",
Port: 11,
}, {
@ -396,84 +396,84 @@ func TestEndpointsToEndpointsMap(t *testing.T) {
func TestUpdateEndpointsMap(t *testing.T) {
var nodeName = testHostname
emptyEndpoint := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{}
emptyEndpoint := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{}
}
unnamedPort := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
unnamedPort := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Port: 11,
}},
}}
}
unnamedPortLocal := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
unnamedPortLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Port: 11,
}},
}}
}
namedPortLocal := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
namedPortLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
}},
}}
}
namedPort := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
namedPort := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
}},
}}
}
namedPortRenamed := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
namedPortRenamed := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11-2",
Port: 11,
}},
}}
}
namedPortRenumbered := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
namedPortRenumbered := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 22,
}},
}}
}
namedPortsLocalNoLocal := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
namedPortsLocalNoLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
}, {
@ -482,52 +482,52 @@ func TestUpdateEndpointsMap(t *testing.T) {
}},
}}
}
multipleSubsets := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
multipleSubsets := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
}},
}, {
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
}},
}}
}
multipleSubsetsWithLocal := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
multipleSubsetsWithLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
}},
}, {
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
}},
}}
}
multipleSubsetsMultiplePortsLocal := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
multipleSubsetsMultiplePortsLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
}, {
@ -535,24 +535,24 @@ func TestUpdateEndpointsMap(t *testing.T) {
Port: 12,
}},
}, {
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.3",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p13",
Port: 13,
}},
}}
}
multipleSubsetsIPsPorts1 := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
multipleSubsetsIPsPorts1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
}, {
@ -560,13 +560,13 @@ func TestUpdateEndpointsMap(t *testing.T) {
Port: 12,
}},
}, {
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.3",
}, {
IP: "1.1.1.4",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p13",
Port: 13,
}, {
@ -575,15 +575,15 @@ func TestUpdateEndpointsMap(t *testing.T) {
}},
}}
}
multipleSubsetsIPsPorts2 := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
multipleSubsetsIPsPorts2 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.1",
}, {
IP: "2.2.2.2",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p21",
Port: 21,
}, {
@ -592,81 +592,81 @@ func TestUpdateEndpointsMap(t *testing.T) {
}},
}}
}
complexBefore1 := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
complexBefore1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
}},
}}
}
complexBefore2 := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
complexBefore2 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.2",
NodeName: &nodeName,
}, {
IP: "2.2.2.22",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p22",
Port: 22,
}},
}, {
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.3",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p23",
Port: 23,
}},
}}
}
complexBefore4 := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
complexBefore4 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.4",
NodeName: &nodeName,
}, {
IP: "4.4.4.5",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p44",
Port: 44,
}},
}, {
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.6",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p45",
Port: 45,
}},
}}
}
complexAfter1 := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
complexAfter1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.11",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
}},
}, {
Addresses: []api.EndpointAddress{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
}, {
@ -675,24 +675,24 @@ func TestUpdateEndpointsMap(t *testing.T) {
}},
}}
}
complexAfter3 := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
complexAfter3 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "3.3.3.3",
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p33",
Port: 33,
}},
}}
}
complexAfter4 := func(ept *api.Endpoints) {
ept.Subsets = []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{
complexAfter4 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.4",
NodeName: &nodeName,
}},
Ports: []api.EndpointPort{{
Ports: []v1.EndpointPort{{
Name: "p44",
Port: 44,
}},
@ -703,8 +703,8 @@ func TestUpdateEndpointsMap(t *testing.T) {
// previousEndpoints and currentEndpoints are used to call appropriate
// handlers OnEndpoints* (based on whether corresponding values are nil
// or non-nil) and must be of equal length.
previousEndpoints []*api.Endpoints
currentEndpoints []*api.Endpoints
previousEndpoints []*v1.Endpoints
currentEndpoints []*v1.Endpoints
oldEndpoints map[ServicePortName][]*BaseEndpointInfo
expectedResult map[ServicePortName][]*BaseEndpointInfo
expectedStaleEndpoints []ServiceEndpoint
@ -719,10 +719,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[1]: no change, unnamed port
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -740,10 +740,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[2]: no change, named port, local
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortLocal),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortLocal),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -763,10 +763,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
},
}, {
// Case[3]: no change, multiple subsets
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -790,10 +790,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[4]: no change, multiple subsets, multiple ports, local
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -825,11 +825,11 @@ func TestUpdateEndpointsMap(t *testing.T) {
},
}, {
// Case[5]: no change, multiple endpoints, subsets, IPs, and ports
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1),
makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1),
makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2),
},
@ -893,10 +893,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
},
}, {
// Case[6]: add an Endpoints
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
nil,
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPortLocal),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{},
@ -914,10 +914,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
},
}, {
// Case[7]: remove an Endpoints
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPortLocal),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
nil,
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -934,10 +934,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[8]: add an IP and port
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -964,10 +964,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
},
}, {
// Case[9]: remove an IP and port
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -999,10 +999,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[10]: add a subset
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsWithLocal),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -1027,10 +1027,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
},
}, {
// Case[11]: remove a subset
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -1054,10 +1054,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[12]: rename a port
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortRenamed),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -1080,10 +1080,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[13]: renumber a port
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortRenumbered),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
@ -1104,13 +1104,13 @@ func TestUpdateEndpointsMap(t *testing.T) {
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
// Case[14]: complex add and remove
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", complexBefore1),
makeTestEndpoints("ns2", "ep2", complexBefore2),
nil,
makeTestEndpoints("ns4", "ep4", complexBefore4),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", complexAfter1),
nil,
makeTestEndpoints("ns3", "ep3", complexAfter3),
@ -1179,10 +1179,10 @@ func TestUpdateEndpointsMap(t *testing.T) {
},
}, {
// Case[15]: change from 0 endpoint address to 1 unnamed port
previousEndpoints: []*api.Endpoints{
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", emptyEndpoint),
},
currentEndpoints: []*api.Endpoints{
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{},

View File

@ -15,13 +15,13 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/proxy/healthcheck",
deps = [
"//pkg/apis/core:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/github.com/renstrom/dedent:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -30,10 +30,10 @@ go_test(
srcs = ["healthcheck_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)

View File

@ -25,8 +25,8 @@ import (
"sync/atomic"
"time"
"github.com/golang/glog"
"github.com/renstrom/dedent"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -133,9 +133,9 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err
// Remove any that are not needed any more.
for nsn, svc := range hcs.services {
if port, found := newServices[nsn]; !found || port != svc.port {
glog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port)
klog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port)
if err := svc.listener.Close(); err != nil {
glog.Errorf("Close(%v): %v", svc.listener.Addr(), err)
klog.Errorf("Close(%v): %v", svc.listener.Addr(), err)
}
delete(hcs.services, nsn)
}
@ -144,11 +144,11 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err
// Add any that are needed.
for nsn, port := range newServices {
if hcs.services[nsn] != nil {
glog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port)
klog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port)
continue
}
glog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port)
klog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port)
svc := &hcInstance{port: port}
addr := fmt.Sprintf(":%d", port)
svc.server = hcs.httpFactory.New(addr, hcHandler{name: nsn, hcs: hcs})
@ -166,19 +166,19 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err
UID: types.UID(nsn.String()),
}, api.EventTypeWarning, "FailedToStartServiceHealthcheck", msg)
}
glog.Error(msg)
klog.Error(msg)
continue
}
hcs.services[nsn] = svc
go func(nsn types.NamespacedName, svc *hcInstance) {
// Serve() will exit when the listener is closed.
glog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port)
klog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port)
if err := svc.server.Serve(svc.listener); err != nil {
glog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err)
klog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err)
return
}
glog.V(3).Infof("Healthcheck %q closed", nsn.String())
klog.V(3).Infof("Healthcheck %q closed", nsn.String())
}(nsn, svc)
}
return nil
@ -203,7 +203,7 @@ func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
svc, ok := h.hcs.services[h.name]
if !ok || svc == nil {
h.hcs.lock.Unlock()
glog.Errorf("Received request for closed healthcheck %q", h.name.String())
klog.Errorf("Received request for closed healthcheck %q", h.name.String())
return
}
count := svc.endpoints
@ -232,10 +232,10 @@ func (hcs *server) SyncEndpoints(newEndpoints map[types.NamespacedName]int) erro
for nsn, count := range newEndpoints {
if hcs.services[nsn] == nil {
glog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String())
klog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String())
continue
}
glog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String())
klog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String())
hcs.services[nsn].endpoints = count
}
for nsn, hci := range hcs.services {
@ -306,7 +306,7 @@ func (hs *HealthzServer) Run() {
server := hs.httpFactory.New(hs.addr, serveMux)
go wait.Until(func() {
glog.V(3).Infof("Starting goroutine for healthz on %s", hs.addr)
klog.V(3).Infof("Starting goroutine for healthz on %s", hs.addr)
listener, err := hs.listener.Listen(hs.addr)
if err != nil {
@ -314,15 +314,15 @@ func (hs *HealthzServer) Run() {
if hs.recorder != nil {
hs.recorder.Eventf(hs.nodeRef, api.EventTypeWarning, "FailedToStartNodeHealthcheck", msg)
}
glog.Error(msg)
klog.Error(msg)
return
}
if err := server.Serve(listener); err != nil {
glog.Errorf("Healthz closed with error: %v", err)
klog.Errorf("Healthz closed with error: %v", err)
return
}
glog.Errorf("Unexpected healthz closed.")
klog.Error("Unexpected healthz closed.")
}, nodeHealthzRetryInterval, wait.NeverStop)
}

View File

@ -8,12 +8,9 @@ load(
go_library(
name = "go_default_library",
srcs = [
"proxier.go",
],
srcs = ["proxier.go"],
importpath = "k8s.io/kubernetes/pkg/proxy/iptables",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/healthcheck:go_default_library",
"//pkg/proxy/metrics:go_default_library",
@ -23,12 +20,12 @@ go_library(
"//pkg/util/iptables:go_default_library",
"//pkg/util/net:go_default_library",
"//pkg/util/sysctl:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@ -38,7 +35,6 @@ go_test(
srcs = ["proxier_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/proxy/util/testing:go_default_library",
@ -46,10 +42,11 @@ go_test(
"//pkg/util/conntrack:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/iptables/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],

View File

@ -32,13 +32,13 @@ import (
"sync/atomic"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
"k8s.io/kubernetes/pkg/proxy/metrics"
@ -48,7 +48,6 @@ import (
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/kubernetes/pkg/util/net"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilversion "k8s.io/kubernetes/pkg/util/version"
utilexec "k8s.io/utils/exec"
)
@ -149,7 +148,7 @@ type serviceInfo struct {
}
// returns a new proxy.ServicePort which abstracts a serviceInfo
func newServiceInfo(port *api.ServicePort, service *api.Service, baseInfo *proxy.BaseServiceInfo) proxy.ServicePort {
func newServiceInfo(port *v1.ServicePort, service *v1.Service, baseInfo *proxy.BaseServiceInfo) proxy.ServicePort {
info := &serviceInfo{BaseServiceInfo: baseInfo}
// Store the following for performance reasons.
@ -183,7 +182,7 @@ func newEndpointInfo(baseInfo *proxy.BaseEndpointInfo) proxy.Endpoint {
func (e *endpointsInfo) Equal(other proxy.Endpoint) bool {
o, ok := other.(*endpointsInfo)
if !ok {
glog.Errorf("Failed to cast endpointsInfo")
klog.Error("Failed to cast endpointsInfo")
return false
}
return e.Endpoint == o.Endpoint &&
@ -243,11 +242,18 @@ type Proxier struct {
// The following buffers are used to reuse memory and avoid allocations
// that are significantly impacting performance.
iptablesData *bytes.Buffer
filterChains *bytes.Buffer
filterRules *bytes.Buffer
natChains *bytes.Buffer
natRules *bytes.Buffer
iptablesData *bytes.Buffer
existingFilterChainsData *bytes.Buffer
filterChains *bytes.Buffer
filterRules *bytes.Buffer
natChains *bytes.Buffer
natRules *bytes.Buffer
// endpointChainsNumber is the total amount of endpointChains across all
// services that we will generate (it is computed at the beginning of
// syncProxyRules method). If that is large enough, comments in some
// iptable rules are dropped to improve performance.
endpointChainsNumber int
// Values are as a parameter to select the interfaces where nodeport works.
nodePortAddresses []string
@ -287,15 +293,17 @@ func NewProxier(ipt utiliptables.Interface,
nodePortAddresses []string,
) (*Proxier, error) {
// Set the route_localnet sysctl we need for
if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil {
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err)
if val, _ := sysctl.GetSysctl(sysctlRouteLocalnet); val != 1 {
if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil {
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err)
}
}
// Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers
// are connected to a Linux bridge (but not SDN bridges). Until most
// plugins handle this, log when config is missing
if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 {
glog.Warningf("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended")
klog.Warning("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended")
}
// Generate the masquerade mark to use for SNAT rules.
@ -303,12 +311,12 @@ func NewProxier(ipt utiliptables.Interface,
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
if nodeIP == nil {
glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
klog.Warning("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
nodeIP = net.ParseIP("127.0.0.1")
}
if len(clusterCIDR) == 0 {
glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic")
klog.Warning("clusterCIDR not specified, unable to distinguish between internal and external traffic")
} else if utilnet.IsIPv6CIDR(clusterCIDR) != ipt.IsIpv6() {
return nil, fmt.Errorf("clusterCIDR %s has incorrect IP version: expect isIPv6=%t", clusterCIDR, ipt.IsIpv6())
}
@ -335,6 +343,7 @@ func NewProxier(ipt utiliptables.Interface,
healthzServer: healthzServer,
precomputedProbabilities: make([]string, 0, 1001),
iptablesData: bytes.NewBuffer(nil),
existingFilterChainsData: bytes.NewBuffer(nil),
filterChains: bytes.NewBuffer(nil),
filterRules: bytes.NewBuffer(nil),
natChains: bytes.NewBuffer(nil),
@ -343,7 +352,7 @@ func NewProxier(ipt utiliptables.Interface,
networkInterfacer: utilproxy.RealNetwork{},
}
burstSyncs := 2
glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
klog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
return proxier, nil
}
@ -383,7 +392,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
)
if err := ipt.DeleteRule(chain.table, chain.sourceChain, args...); err != nil {
if !utiliptables.IsNotFoundError(err) {
glog.Errorf("Error removing pure-iptables proxy rule: %v", err)
klog.Errorf("Error removing pure-iptables proxy rule: %v", err)
encounteredError = true
}
}
@ -392,7 +401,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
// Flush and remove all of our "-t nat" chains.
iptablesData := bytes.NewBuffer(nil)
if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil {
glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableNAT, err)
klog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableNAT, err)
encounteredError = true
} else {
existingNATChains := utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes())
@ -403,16 +412,16 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain} {
if _, found := existingNATChains[chain]; found {
chainString := string(chain)
writeLine(natChains, existingNATChains[chain]) // flush
writeLine(natRules, "-X", chainString) // delete
writeBytesLine(natChains, existingNATChains[chain]) // flush
writeLine(natRules, "-X", chainString) // delete
}
}
// Hunt for service and endpoint chains.
for chain := range existingNATChains {
chainString := string(chain)
if strings.HasPrefix(chainString, "KUBE-SVC-") || strings.HasPrefix(chainString, "KUBE-SEP-") || strings.HasPrefix(chainString, "KUBE-FW-") || strings.HasPrefix(chainString, "KUBE-XLB-") {
writeLine(natChains, existingNATChains[chain]) // flush
writeLine(natRules, "-X", chainString) // delete
writeBytesLine(natChains, existingNATChains[chain]) // flush
writeLine(natRules, "-X", chainString) // delete
}
}
writeLine(natRules, "COMMIT")
@ -420,15 +429,15 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
// Write it.
err = ipt.Restore(utiliptables.TableNAT, natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableNAT, err)
klog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableNAT, err)
encounteredError = true
}
}
// Flush and remove all of our "-t filter" chains.
iptablesData = bytes.NewBuffer(nil)
iptablesData.Reset()
if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil {
glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err)
klog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err)
encounteredError = true
} else {
existingFilterChains := utiliptables.GetChainLines(utiliptables.TableFilter, iptablesData.Bytes())
@ -438,7 +447,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain} {
if _, found := existingFilterChains[chain]; found {
chainString := string(chain)
writeLine(filterChains, existingFilterChains[chain])
writeBytesLine(filterChains, existingFilterChains[chain])
writeLine(filterRules, "-X", chainString)
}
}
@ -446,7 +455,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
filterLines := append(filterChains.Bytes(), filterRules.Bytes()...)
// Write it.
if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil {
glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err)
klog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err)
encounteredError = true
}
}
@ -501,17 +510,17 @@ func (proxier *Proxier) isInitialized() bool {
return atomic.LoadInt32(&proxier.initialized) > 0
}
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
func (proxier *Proxier) OnServiceAdd(service *v1.Service) {
proxier.OnServiceUpdate(nil, service)
}
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) {
if proxier.serviceChanges.Update(oldService, service) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
func (proxier *Proxier) OnServiceDelete(service *v1.Service) {
proxier.OnServiceUpdate(service, nil)
}
@ -526,17 +535,17 @@ func (proxier *Proxier) OnServiceSynced() {
proxier.syncProxyRules()
}
func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) {
func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) {
proxier.OnEndpointsUpdate(nil, endpoints)
}
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
if proxier.endpointsChanges.Update(oldEndpoints, endpoints) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) {
func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) {
proxier.OnEndpointsUpdate(endpoints, nil)
}
@ -596,16 +605,29 @@ func servicePortEndpointChainName(servicePortName string, protocol string, endpo
// TODO: move it to util
func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceEndpoint) {
for _, epSvcPair := range connectionMap {
if svcInfo, ok := proxier.serviceMap[epSvcPair.ServicePortName]; ok && svcInfo.GetProtocol() == api.ProtocolUDP {
if svcInfo, ok := proxier.serviceMap[epSvcPair.ServicePortName]; ok && svcInfo.GetProtocol() == v1.ProtocolUDP {
endpointIP := utilproxy.IPPart(epSvcPair.Endpoint)
err := conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, v1.ProtocolUDP)
if err != nil {
glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err)
klog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err)
}
}
}
}
const endpointChainsNumberThreshold = 1000
// Assumes proxier.mu is held.
func (proxier *Proxier) appendServiceCommentLocked(args []string, svcName string) {
// Not printing these comments, can reduce size of iptables (in case of large
// number of endpoints) even by 40%+. So if total number of endpoint chains
// is large enough, we simply drop those comments.
if proxier.endpointChainsNumber > endpointChainsNumberThreshold {
return
}
args = append(args, "-m", "comment", "--comment", svcName)
}
// This is where all of the iptables-save/restore calls happen.
// The only other iptables rules are those that are setup in iptablesInit()
// This assumes proxier.mu is NOT held
@ -616,11 +638,11 @@ func (proxier *Proxier) syncProxyRules() {
start := time.Now()
defer func() {
metrics.SyncProxyRulesLatency.Observe(metrics.SinceInMicroseconds(start))
glog.V(4).Infof("syncProxyRules took %v", time.Since(start))
klog.V(4).Infof("syncProxyRules took %v", time.Since(start))
}()
// don't sync rules till we've received services and endpoints
if !proxier.endpointsSynced || !proxier.servicesSynced {
glog.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master")
klog.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master")
return
}
@ -633,18 +655,18 @@ func (proxier *Proxier) syncProxyRules() {
staleServices := serviceUpdateResult.UDPStaleClusterIP
// merge stale services gathered from updateEndpointsMap
for _, svcPortName := range endpointUpdateResult.StaleServiceNames {
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.GetProtocol() == api.ProtocolUDP {
glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString())
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.GetProtocol() == v1.ProtocolUDP {
klog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString())
staleServices.Insert(svcInfo.ClusterIPString())
}
}
glog.V(3).Infof("Syncing iptables rules")
klog.V(3).Info("Syncing iptables rules")
// Create and link the kube chains.
for _, chain := range iptablesJumpChains {
if _, err := proxier.iptables.EnsureChain(chain.table, chain.chain); err != nil {
glog.Errorf("Failed to ensure that %s chain %s exists: %v", chain.table, kubeServicesChain, err)
klog.Errorf("Failed to ensure that %s chain %s exists: %v", chain.table, kubeServicesChain, err)
return
}
args := append(chain.extraArgs,
@ -652,7 +674,7 @@ func (proxier *Proxier) syncProxyRules() {
"-j", string(chain.chain),
)
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, chain.table, chain.sourceChain, args...); err != nil {
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.chain, err)
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.chain, err)
return
}
}
@ -663,20 +685,21 @@ func (proxier *Proxier) syncProxyRules() {
// Get iptables-save output so we can check for existing chains and rules.
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
existingFilterChains := make(map[utiliptables.Chain]string)
proxier.iptablesData.Reset()
err := proxier.iptables.SaveInto(utiliptables.TableFilter, proxier.iptablesData)
existingFilterChains := make(map[utiliptables.Chain][]byte)
proxier.existingFilterChainsData.Reset()
err := proxier.iptables.SaveInto(utiliptables.TableFilter, proxier.existingFilterChainsData)
if err != nil { // if we failed to get any rules
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
} else { // otherwise parse the output
existingFilterChains = utiliptables.GetChainLines(utiliptables.TableFilter, proxier.iptablesData.Bytes())
existingFilterChains = utiliptables.GetChainLines(utiliptables.TableFilter, proxier.existingFilterChainsData.Bytes())
}
existingNATChains := make(map[utiliptables.Chain]string)
// IMPORTANT: existingNATChains may share memory with proxier.iptablesData.
existingNATChains := make(map[utiliptables.Chain][]byte)
proxier.iptablesData.Reset()
err = proxier.iptables.SaveInto(utiliptables.TableNAT, proxier.iptablesData)
if err != nil { // if we failed to get any rules
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
} else { // otherwise parse the output
existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, proxier.iptablesData.Bytes())
}
@ -696,14 +719,14 @@ func (proxier *Proxier) syncProxyRules() {
// (which most should have because we created them above).
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain} {
if chain, ok := existingFilterChains[chainName]; ok {
writeLine(proxier.filterChains, chain)
writeBytesLine(proxier.filterChains, chain)
} else {
writeLine(proxier.filterChains, utiliptables.MakeChainLine(chainName))
}
}
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain} {
if chain, ok := existingNATChains[chainName]; ok {
writeLine(proxier.natChains, chain)
writeBytesLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(chainName))
}
@ -747,11 +770,17 @@ func (proxier *Proxier) syncProxyRules() {
// is just for efficiency, not correctness.
args := make([]string, 64)
// Compute total number of endpoint chains across all services.
proxier.endpointChainsNumber = 0
for svcName := range proxier.serviceMap {
proxier.endpointChainsNumber += len(proxier.endpointsMap[svcName])
}
// Build rules for each service.
for svcName, svc := range proxier.serviceMap {
svcInfo, ok := svc.(*serviceInfo)
if !ok {
glog.Errorf("Failed to cast serviceInfo %q", svcName.String())
klog.Errorf("Failed to cast serviceInfo %q", svcName.String())
continue
}
isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP)
@ -763,7 +792,7 @@ func (proxier *Proxier) syncProxyRules() {
if hasEndpoints {
// Create the per-service chain, retaining counters if possible.
if chain, ok := existingNATChains[svcChain]; ok {
writeLine(proxier.natChains, chain)
writeBytesLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(svcChain))
}
@ -775,7 +804,7 @@ func (proxier *Proxier) syncProxyRules() {
// Only for services request OnlyLocal traffic
// create the per-service LB chain, retaining counters if possible.
if lbChain, ok := existingNATChains[svcXlbChain]; ok {
writeLine(proxier.natChains, lbChain)
writeBytesLine(proxier.natChains, lbChain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(svcXlbChain))
}
@ -819,8 +848,8 @@ func (proxier *Proxier) syncProxyRules() {
// machine, hold the local port open so no other process can open it
// (because the socket might open but it would never work).
if local, err := utilproxy.IsLocalIP(externalIP); err != nil {
glog.Errorf("can't determine if IP is local, assuming not: %v", err)
} else if local {
klog.Errorf("can't determine if IP is local, assuming not: %v", err)
} else if local && (svcInfo.GetProtocol() != v1.ProtocolSCTP) {
lp := utilproxy.LocalPort{
Description: "externalIP for " + svcNameString,
IP: externalIP,
@ -828,7 +857,7 @@ func (proxier *Proxier) syncProxyRules() {
Protocol: protocol,
}
if proxier.portsMap[lp] != nil {
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
klog.V(4).Infof("Port %s was open before and is still needed", lp.String())
replacementPortsMap[lp] = proxier.portsMap[lp]
} else {
socket, err := proxier.portMapper.OpenLocalPort(&lp)
@ -841,8 +870,8 @@ func (proxier *Proxier) syncProxyRules() {
Name: proxier.hostname,
UID: types.UID(proxier.hostname),
Namespace: "",
}, api.EventTypeWarning, err.Error(), msg)
glog.Error(msg)
}, v1.EventTypeWarning, err.Error(), msg)
klog.Error(msg)
continue
}
replacementPortsMap[lp] = socket
@ -891,7 +920,7 @@ func (proxier *Proxier) syncProxyRules() {
if ingress.IP != "" {
// create service firewall chain
if chain, ok := existingNATChains[fwChain]; ok {
writeLine(proxier.natChains, chain)
writeBytesLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(fwChain))
}
@ -962,7 +991,7 @@ func (proxier *Proxier) syncProxyRules() {
// (because the socket might open but it would never work).
addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer)
if err != nil {
glog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err)
klog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err)
continue
}
@ -987,12 +1016,12 @@ func (proxier *Proxier) syncProxyRules() {
// For ports on node IPs, open the actual port and hold it.
for _, lp := range lps {
if proxier.portsMap[lp] != nil {
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
klog.V(4).Infof("Port %s was open before and is still needed", lp.String())
replacementPortsMap[lp] = proxier.portsMap[lp]
} else {
} else if svcInfo.GetProtocol() != v1.ProtocolSCTP {
socket, err := proxier.portMapper.OpenLocalPort(&lp)
if err != nil {
glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
klog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
continue
}
if lp.Protocol == "udp" {
@ -1002,7 +1031,7 @@ func (proxier *Proxier) syncProxyRules() {
// See issue: https://github.com/kubernetes/kubernetes/issues/49881
err := conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, v1.ProtocolUDP)
if err != nil {
glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err)
klog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err)
}
}
replacementPortsMap[lp] = socket
@ -1058,7 +1087,7 @@ func (proxier *Proxier) syncProxyRules() {
for _, ep := range proxier.endpointsMap[svcName] {
epInfo, ok := ep.(*endpointsInfo)
if !ok {
glog.Errorf("Failed to cast endpointsInfo %q", ep.String())
klog.Errorf("Failed to cast endpointsInfo %q", ep.String())
continue
}
endpoints = append(endpoints, epInfo)
@ -1067,7 +1096,7 @@ func (proxier *Proxier) syncProxyRules() {
// Create the endpoint chain, retaining counters if possible.
if chain, ok := existingNATChains[utiliptables.Chain(endpointChain)]; ok {
writeLine(proxier.natChains, chain)
writeBytesLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(endpointChain))
}
@ -1075,14 +1104,18 @@ func (proxier *Proxier) syncProxyRules() {
}
// First write session affinity rules, if applicable.
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
for _, endpointChain := range endpointChains {
writeLine(proxier.natRules,
args = append(args[:0],
"-A", string(svcChain),
"-m", "comment", "--comment", svcNameString,
)
proxier.appendServiceCommentLocked(args, svcNameString)
args = append(args,
"-m", "recent", "--name", string(endpointChain),
"--rcheck", "--seconds", strconv.Itoa(svcInfo.StickyMaxAgeSeconds), "--reap",
"-j", string(endpointChain))
"-j", string(endpointChain),
)
writeLine(proxier.natRules, args...)
}
}
@ -1095,10 +1128,8 @@ func (proxier *Proxier) syncProxyRules() {
continue
}
// Balancing rules in the per-service chain.
args = append(args[:0], []string{
"-A", string(svcChain),
"-m", "comment", "--comment", svcNameString,
}...)
args = append(args[:0], "-A", string(svcChain))
proxier.appendServiceCommentLocked(args, svcNameString)
if i < (n - 1) {
// Each rule is a probabilistic match.
args = append(args,
@ -1111,16 +1142,14 @@ func (proxier *Proxier) syncProxyRules() {
writeLine(proxier.natRules, args...)
// Rules in the per-endpoint chain.
args = append(args[:0],
"-A", string(endpointChain),
"-m", "comment", "--comment", svcNameString,
)
args = append(args[:0], "-A", string(endpointChain))
proxier.appendServiceCommentLocked(args, svcNameString)
// Handle traffic that loops back to the originator with SNAT.
writeLine(proxier.natRules, append(args,
"-s", utilproxy.ToCIDR(net.ParseIP(epIP)),
"-j", string(KubeMarkMasqChain))...)
// Update client-affinity lists.
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
args = append(args, "-m", "recent", "--name", string(endpointChain), "--set")
}
// DNAT to final destination.
@ -1171,7 +1200,7 @@ func (proxier *Proxier) syncProxyRules() {
writeLine(proxier.natRules, args...)
} else {
// First write session affinity rules only over local endpoints, if applicable.
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
for _, endpointChain := range localEndpointChains {
writeLine(proxier.natRules,
"-A", string(svcXlbChain),
@ -1215,7 +1244,7 @@ func (proxier *Proxier) syncProxyRules() {
// We must (as per iptables) write a chain-line for it, which has
// the nice effect of flushing the chain. Then we can remove the
// chain.
writeLine(proxier.natChains, existingNATChains[chain])
writeBytesLine(proxier.natChains, existingNATChains[chain])
writeLine(proxier.natRules, "-X", chainString)
}
}
@ -1224,7 +1253,7 @@ func (proxier *Proxier) syncProxyRules() {
// other service portal rules.
addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer)
if err != nil {
glog.Errorf("Failed to get node ip address matching nodeport cidr")
klog.Errorf("Failed to get node ip address matching nodeport cidr")
} else {
isIPv6 := proxier.iptables.IsIpv6()
for address := range addresses {
@ -1241,7 +1270,7 @@ func (proxier *Proxier) syncProxyRules() {
}
// Ignore IP addresses with incorrect version
if isIPv6 && !utilnet.IsIPv6String(address) || !isIPv6 && utilnet.IsIPv6String(address) {
glog.Errorf("IP address %s has incorrect IP version", address)
klog.Errorf("IP address %s has incorrect IP version", address)
continue
}
// create nodeport rules for each IP one by one
@ -1300,12 +1329,12 @@ func (proxier *Proxier) syncProxyRules() {
proxier.iptablesData.Write(proxier.natChains.Bytes())
proxier.iptablesData.Write(proxier.natRules.Bytes())
glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes())
klog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes())
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
glog.Errorf("Failed to execute iptables-restore: %v", err)
klog.Errorf("Failed to execute iptables-restore: %v", err)
// Revert new local ports.
glog.V(2).Infof("Closing local ports after iptables-restore failure")
klog.V(2).Infof("Closing local ports after iptables-restore failure")
utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap)
return
}
@ -1327,17 +1356,17 @@ func (proxier *Proxier) syncProxyRules() {
// not "OnlyLocal", but the services list will not, and the healthChecker
// will just drop those endpoints.
if err := proxier.healthChecker.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil {
glog.Errorf("Error syncing healtcheck services: %v", err)
klog.Errorf("Error syncing healthcheck services: %v", err)
}
if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil {
glog.Errorf("Error syncing healthcheck endoints: %v", err)
klog.Errorf("Error syncing healthcheck endpoints: %v", err)
}
// Finish housekeeping.
// TODO: these could be made more consistent.
for _, svcIP := range staleServices.UnsortedList() {
if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil {
glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err)
klog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err)
}
}
proxier.deleteEndpointConnections(endpointUpdateResult.StaleEndpoints)
@ -1356,6 +1385,11 @@ func writeLine(buf *bytes.Buffer, words ...string) {
}
}
func writeBytesLine(buf *bytes.Buffer, bytes []byte) {
buf.Write(bytes)
buf.WriteByte('\n')
}
func openLocalPort(lp *utilproxy.LocalPort) (utilproxy.Closeable, error) {
// For ports on node IPs, open the actual port and hold it, even though we
// use iptables to redirect traffic.
@ -1390,6 +1424,6 @@ func openLocalPort(lp *utilproxy.LocalPort) (utilproxy.Closeable, error) {
default:
return nil, fmt.Errorf("unknown protocol %q", lp.Protocol)
}
glog.V(2).Infof("Opened local port %s", lp.String())
klog.V(2).Infof("Opened local port %s", lp.String())
return socket, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/ipvs/testing:go_default_library",
"//pkg/proxy/util:go_default_library",
@ -25,10 +24,11 @@ go_test(
"//pkg/util/iptables/testing:go_default_library",
"//pkg/util/ipvs:go_default_library",
"//pkg/util/ipvs/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
@ -37,48 +37,15 @@ go_test(
go_library(
name = "go_default_library",
srcs = [
"graceful_termination.go",
"ipset.go",
"netlink.go",
"netlink_linux.go",
"netlink_unsupported.go",
"proxier.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"netlink_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"netlink_unsupported.go",
],
"//conditions:default": [],
}),
],
importpath = "k8s.io/kubernetes/pkg/proxy/ipvs",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/healthcheck:go_default_library",
"//pkg/proxy/metrics:go_default_library",
@ -90,13 +57,13 @@ go_library(
"//pkg/util/ipvs:go_default_library",
"//pkg/util/net:go_default_library",
"//pkg/util/sysctl:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [

View File

@ -2,6 +2,7 @@ reviewers:
- thockin
- brendandburns
- m1093782566
- Lion-Wei
approvers:
- thockin
- brendandburns

View File

@ -27,180 +27,239 @@ IPVS runs on a host and acts as a load balancer in front of a cluster of real se
and UDP-based services to the real servers, and make services of real servers appear as virtual services on a single IP address.
## IPVS vs. IPTABLES
IPVS mode was introduced in Kubernetes v1.8 and goes beta in v1.9. IPTABLES mode was added in v1.1 and become the default operating mode since v1.2. Both IPVS and IPTABLES are based on `netfilter`.
IPVS mode was introduced in Kubernetes v1.8, goes beta in v1.9 and GA in v1.11. IPTABLES mode was added in v1.1 and become the default operating mode since v1.2. Both IPVS and IPTABLES are based on `netfilter`.
Differences between IPVS mode and IPTABLES mode are as follows:
1. IPVS provides better scalability and performance for large clusters.
1. IPVS provides better scalability and performance for large clusters.
2. IPVS supports more sophisticated load balancing algorithms than iptables (least load, least connections, locality, weighted, etc.).
2. IPVS supports more sophisticated load balancing algorithms than iptables (least load, least connections, locality, weighted, etc.).
3. IPVS supports server health checking and connection retries, etc.
### When ipvs falls back to iptables
IPVS proxier will employ iptables in doing packet filtering, SNAT and supporting NodePort type service. Specifically, ipvs proxier will fall back on iptables in the following 4 scenarios.
IPVS proxier will employ iptables in doing packet filtering, SNAT or masquerade.
Specifically, ipvs proxier will use ipset to store source or destination address of traffics that need DROP or do masquerade, to make sure the number of iptables rules be constant, no metter how many services we have.
Here is the table of ipset sets that ipvs proxier used.
| set name | members | usage |
| :----------------------------- | ---------------------------------------- | ---------------------------------------- |
| KUBE-CLUSTER-IP | All service IP + port | Mark-Masq for cases that `masquerade-all=true` or `clusterCIDR` specified |
| KUBE-LOOP-BACK | All service IP + port + IP | masquerade for solving hairpin purpose |
| KUBE-EXTERNAL-IP | service external IP + port | masquerade for packages to external IPs |
| KUBE-LOAD-BALANCER | load balancer ingress IP + port | masquerade for packages to load balancer type service |
| KUBE-LOAD-BALANCER-LOCAL | LB ingress IP + port with `externalTrafficPolicy=local` | accept packages to load balancer with `externalTrafficPolicy=local` |
| KUBE-LOAD-BALANCER-FW | load balancer ingress IP + port with `loadBalancerSourceRanges` | package filter for load balancer with `loadBalancerSourceRanges` specified |
| KUBE-LOAD-BALANCER-SOURCE-CIDR | load balancer ingress IP + port + source CIDR | package filter for load balancer with `loadBalancerSourceRanges` specified |
| KUBE-NODE-PORT-TCP | nodeport type service TCP port | masquerade for packets to nodePort(TCP) |
| KUBE-NODE-PORT-LOCAL-TCP | nodeport type service TCP port with `externalTrafficPolicy=local` | accept packages to nodeport service with `externalTrafficPolicy=local` |
| KUBE-NODE-PORT-UDP | nodeport type service UDP port | masquerade for packets to nodePort(UDP) |
| KUBE-NODE-PORT-LOCAL-UDP | nodeport type service UDP port with `externalTrafficPolicy=local` | accept packages to nodeport service with `externalTrafficPolicy=local` |
IPVS proxier will fall back on iptables in the following scenarios.
**1. kube-proxy starts with --masquerade-all=true**
If kube-proxy starts with `--masquerade-all=true`, ipvs proxier will masquerade all traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose there is a service with Cluster IP `10.244.5.1` and port `8080`, then the iptables installed by ipvs proxier should be like what is shown below.
If kube-proxy starts with `--masquerade-all=true`, ipvs proxier will masquerade all traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose kube-proxy have flag `--masquerade-all=true` specified, then the iptables installed by ipvs proxier should be like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- 0.0.0.0/0 10.244.5.1 /* default/foo:http cluster IP */ tcp dpt:8080
target prot opt source destination
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
```
**2. Specify cluster CIDR in kube-proxy startup**
If kube-proxy starts with `--cluster-cidr=<cidr>`, ipvs proxier will masquerade off-cluster traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose kube-proxy is provided with the cluster cidr `10.244.16.0/24`, and service Cluster IP is `10.244.5.1` and port is `8080`, then the iptables installed by ipvs proxier should be like what is shown below.
If kube-proxy starts with `--cluster-cidr=<cidr>`, ipvs proxier will masquerade off-cluster traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose kube-proxy is provided with the cluster cidr `10.244.16.0/24`, then the iptables installed by ipvs proxier should be like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- !10.244.16.0/24 10.244.5.1 /* default/foo:http cluster IP */ tcp dpt:8080
```
**3. Load Balancer Source Ranges is specified for LB type service**
When service's `LoadBalancerStatus.ingress.IP` is not empty and service's `LoadBalancerSourceRanges` is specified, ipvs proxier will install iptables which looks like what is shown below.
Suppose service's `LoadBalancerStatus.ingress.IP` is `10.96.1.2` and service's `LoadBalancerSourceRanges` is `10.120.2.0/24`.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-SERVICES (2 references)
target prot opt source destination
ACCEPT tcp -- 10.120.2.0/24 10.96.1.2 /* default/foo:http loadbalancer IP */ tcp dpt:8080
DROP tcp -- 0.0.0.0/0 10.96.1.2 /* default/foo:http loadbalancer IP */ tcp dpt:8080
```
**4. Support NodePort type service**
For supporting NodePort type service, ipvs will recruit the existing implementation in iptables proxier. For example,
```shell
# kubectl describe svc nginx-service
Name: nginx-service
...
Type: NodePort
IP: 10.101.28.148
Port: http 3080/TCP
NodePort: http 31604/TCP
Endpoints: 172.17.0.2:80
Session Affinity: None
# iptables -t nat -nL
[root@100-106-179-225 ~]# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- !172.16.0.0/16 10.101.28.148 /* default/nginx-service:http cluster IP */ tcp dpt:3080
KUBE-SVC-6IM33IEVEEV7U3GP tcp -- 0.0.0.0/0 10.101.28.148 /* default/nginx-service:http cluster IP */ tcp dpt:3080
KUBE-NODEPORTS all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL
Chain KUBE-NODEPORTS (1 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp dpt:31604
KUBE-SVC-6IM33IEVEEV7U3GP tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp dpt:31604
Chain KUBE-SVC-6IM33IEVEEV7U3GP (2 references)
target prot opt source destination
KUBE-SEP-Q3UCPZ54E6Q2R4UT all -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */
Chain KUBE-SEP-Q3UCPZ54E6Q2R4UT (1 references)
target prot opt source destination
KUBE-MARK-MASQ all -- 172.17.0.2 0.0.0.0/0 /* default/nginx-service:http */
DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp to:172.17.0.2:80
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-MARK-MASQ (3 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ all -- !10.244.16.0/24 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
```
**3. Load Balancer type service**
For loadBalancer type service, ipvs proxier will install iptables with match of ipset `KUBE-LOAD-BALANCER`.
Specially when service's `LoadBalancerSourceRanges` is specified or specified `externalTrafficPolicy=local`,
ipvs proxier will create ipset sets `KUBE-LOAD-BALANCER-LOCAL`/`KUBE-LOAD-BALANCER-FW`/`KUBE-LOAD-BALANCER-SOURCE-CIDR`
and install iptables accordingly, which should looks like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-FIREWALL (1 references)
target prot opt source destination
RETURN all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER-SOURCE-CIDR dst,dst,src
KUBE-MARK-DROP all -- 0.0.0.0/0 0.0.0.0/0
Chain KUBE-LOAD-BALANCER (1 references)
target prot opt source destination
KUBE-FIREWALL all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER-FW dst,dst
RETURN all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER-LOCAL dst,dst
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0
Chain KUBE-MARK-DROP (1 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-LOAD-BALANCER all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER dst,dst
```
**4. NodePort type service**
For NodePort type service, ipvs proxier will install iptables with match of ipset `KUBE-NODE-PORT-TCP/KUBE-NODE-PORT-UDP`.
When specified `externalTrafficPolicy=local`,ipvs proxier will create ipset sets `KUBE-NODE-PORT-LOCAL-TC/KUBE-NODE-PORT-LOCAL-UDP`
and install iptables accordingly, which should looks like what is shown below.
Suppose service with TCP type nodePort.
```shell
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-NODE-PORT (1 references)
target prot opt source destination
RETURN all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-NODE-PORT-LOCAL-TCP dst
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-NODE-PORT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-NODE-PORT-TCP dst
```
**5. Service with externalIPs specified**
For service with `externalIPs` specified, ipvs proxier will install iptables with match of ipset `KUBE-EXTERNAL-IP`,
Suppose we have service with `externalIPs` specified, iptables rules should looks like what is shown below.
```shell
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst PHYSDEV match ! --physdev-is-in ADDRTYPE match src-type !LOCAL
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst ADDRTYPE match dst-type LOCAL
```
## Run kube-proxy in ipvs mode
Currently, local-up scripts, GCE scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags.
Currently, local-up scripts, GCE scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags.
### Prerequisite
Ensure IPVS required kernel modules
Ensure IPVS required kernel modules (**Notes**: use `nf_conntrack` instead of `nf_conntrack_ipv4` for Linux kernel 4.19 and later)
```shell
ip_vs
ip_vs_rr
@ -248,13 +307,13 @@ lsmod | grep -e ipvs -e nf_conntrack_ipv4
cut -f1 -d " " /proc/modules | grep -e ip_vs -e nf_conntrack_ipv4
```
Packages such as `ipset` should also be installed on the node before using IPVS mode.
Packages such as `ipset` should also be installed on the node before using IPVS mode.
Kube-proxy will fall back to IPTABLES mode if those requirements are not met.
### Local UP Cluster
Kube-proxy will run in iptables mode by default in a [local-up cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md).
Kube-proxy will run in iptables mode by default in a [local-up cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md).
To use IPVS mode, users should export the env `KUBE_PROXY_MODE=ipvs` to specify the ipvs mode before [starting the cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md#starting-the-cluster):
```shell
@ -266,7 +325,7 @@ export KUBE_PROXY_MODE=ipvs
Similar to local-up cluster, kube-proxy in [clusters running on GCE](https://kubernetes.io/docs/getting-started-guides/gce/) run in iptables mode by default. Users need to export the env `KUBE_PROXY_MODE=ipvs` before [starting a cluster](https://kubernetes.io/docs/getting-started-guides/gce/#starting-a-cluster):
```shell
#before running one of the commmands chosen to start a cluster:
#before running one of the commands chosen to start a cluster:
# curl -sS https://get.k8s.io | bash
# wget -q -O - https://get.k8s.io | bash
# cluster/kube-up.sh
@ -275,20 +334,19 @@ export KUBE_PROXY_MODE=ipvs
### Cluster Created by Kubeadm
Kube-proxy will run in iptables mode by default in a cluster deployed by [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/).
If you are using kubeadm with a [configuration file](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file), you can specify the ipvs mode adding `SupportIPVSProxyMode: true` below the `kubeProxy` field.
If you are using kubeadm with a [configuration file](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file), you have to add `mode: ipvs` and also add `SupportIPVSProxyMode: true` below the `kubeProxy` field as part of the kubeadm configuration.
```json
kind: MasterConfiguration
apiVersion: kubeadm.k8s.io/v1alpha1
...
kubeProxy:
config:
featureGates: SupportIPVSProxyMode=true
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
...
```
Note that in Kubernetes 1.11 and later, `SupportIPVSProxyMode` is set to `true` by default.
before running
`kube init --config <path_to_configuration_file>`
@ -301,7 +359,7 @@ kubeadm init --feature-gates=SupportIPVSProxyMode=true
to specify the ipvs mode before deploying the cluster.
**Notes**
**Notes**
If ipvs mode is successfully on, you should see ipvs proxy rules (use `ipvsadm`) like
```shell
# ipvsadm -ln
@ -316,7 +374,7 @@ or similar logs occur in kube-proxy logs (for example, `/tmp/kube-proxy.log` for
Using ipvs Proxier.
```
While there is no ipvs proxy rules or the following logs ocuurs indicate that the kube-proxy fails to use ipvs mode:
While there is no ipvs proxy rules or the following logs ocuurs indicate that the kube-proxy fails to use ipvs mode:
```
Can't use ipvs proxier, trying iptables proxier
Using iptables Proxier.
@ -352,7 +410,7 @@ UDP 10.0.0.10:53 rr
### Why kube-proxy can't start IPVS mode
Use the following check list to help you solve the problems:
Use the following check list to help you solve the problems:
**1. Enable IPVS feature gateway**

View File

@ -0,0 +1,220 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipvs
import (
"sync"
"time"
"fmt"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
utilipvs "k8s.io/kubernetes/pkg/util/ipvs"
)
const (
rsGracefulDeletePeriod = 15 * time.Minute
rsCheckDeleteInterval = 1 * time.Minute
)
// listItem stores real server information and the process time.
// If nothing special happened, real server will be delete after process time.
type listItem struct {
VirtualServer *utilipvs.VirtualServer
RealServer *utilipvs.RealServer
}
// String return the unique real server name(with virtual server information)
func (g *listItem) String() string {
return GetUniqueRSName(g.VirtualServer, g.RealServer)
}
// GetUniqueRSName return a string type unique rs name with vs information
func GetUniqueRSName(vs *utilipvs.VirtualServer, rs *utilipvs.RealServer) string {
return vs.String() + "/" + rs.String()
}
type graceTerminateRSList struct {
lock sync.Mutex
list map[string]*listItem
}
// add push an new element to the rsList
func (q *graceTerminateRSList) add(rs *listItem) bool {
q.lock.Lock()
defer q.lock.Unlock()
uniqueRS := rs.String()
if _, ok := q.list[uniqueRS]; ok {
return false
}
klog.V(5).Infof("Adding rs %v to graceful delete rsList", rs)
q.list[uniqueRS] = rs
return true
}
// remove remove an element from the rsList
func (q *graceTerminateRSList) remove(rs *listItem) bool {
q.lock.Lock()
defer q.lock.Unlock()
uniqueRS := rs.String()
if _, ok := q.list[uniqueRS]; ok {
return false
}
delete(q.list, uniqueRS)
return true
}
func (q *graceTerminateRSList) flushList(handler func(rsToDelete *listItem) (bool, error)) bool {
success := true
for name, rs := range q.list {
deleted, err := handler(rs)
if err != nil {
klog.Errorf("Try delete rs %q err: %v", name, err)
success = false
}
if deleted {
klog.Infof("lw: remote out of the list: %s", name)
q.remove(rs)
}
}
return success
}
// exist check whether the specified unique RS is in the rsList
func (q *graceTerminateRSList) exist(uniqueRS string) (*listItem, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if rs, ok := q.list[uniqueRS]; ok {
return rs, true
}
return nil, false
}
// GracefulTerminationManager manage rs graceful termination information and do graceful termination work
// rsList is the rs list to graceful termination, ipvs is the ipvsinterface to do ipvs delete/update work
type GracefulTerminationManager struct {
rsList graceTerminateRSList
ipvs utilipvs.Interface
}
// NewGracefulTerminationManager create a gracefulTerminationManager to manage ipvs rs graceful termination work
func NewGracefulTerminationManager(ipvs utilipvs.Interface) *GracefulTerminationManager {
l := make(map[string]*listItem)
return &GracefulTerminationManager{
rsList: graceTerminateRSList{
list: l,
},
ipvs: ipvs,
}
}
// InTerminationList to check whether specified unique rs name is in graceful termination list
func (m *GracefulTerminationManager) InTerminationList(uniqueRS string) bool {
_, exist := m.rsList.exist(uniqueRS)
return exist
}
// GracefulDeleteRS to update rs weight to 0, and add rs to graceful terminate list
func (m *GracefulTerminationManager) GracefulDeleteRS(vs *utilipvs.VirtualServer, rs *utilipvs.RealServer) error {
// Try to delete rs before add it to graceful delete list
ele := &listItem{
VirtualServer: vs,
RealServer: rs,
}
deleted, err := m.deleteRsFunc(ele)
if err != nil {
klog.Errorf("Delete rs %q err: %v", ele.String(), err)
}
if deleted {
return nil
}
rs.Weight = 0
err = m.ipvs.UpdateRealServer(vs, rs)
if err != nil {
return err
}
klog.V(5).Infof("Adding an element to graceful delete rsList: %+v", ele)
m.rsList.add(ele)
return nil
}
func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, error) {
klog.Infof("Trying to delete rs: %s", rsToDelete.String())
rss, err := m.ipvs.GetRealServers(rsToDelete.VirtualServer)
if err != nil {
return false, err
}
for _, rs := range rss {
if rsToDelete.RealServer.Equal(rs) {
if rs.ActiveConn != 0 {
return false, nil
}
klog.Infof("Deleting rs: %s", rsToDelete.String())
err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rs)
if err != nil {
return false, fmt.Errorf("Delete destination %q err: %v", rs.String(), err)
}
return true, nil
}
}
return true, fmt.Errorf("Failed to delete rs %q, can't find the real server", rsToDelete.String())
}
func (m *GracefulTerminationManager) tryDeleteRs() {
if !m.rsList.flushList(m.deleteRsFunc) {
klog.Errorf("Try flush graceful termination list err")
}
}
// MoveRSOutofGracefulDeleteList to delete an rs and remove it from the rsList immediately
func (m *GracefulTerminationManager) MoveRSOutofGracefulDeleteList(uniqueRS string) error {
rsToDelete, find := m.rsList.exist(uniqueRS)
if !find || rsToDelete == nil {
return fmt.Errorf("failed to find rs: %q", uniqueRS)
}
err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rsToDelete.RealServer)
if err != nil {
return err
}
m.rsList.remove(rsToDelete)
return nil
}
// Run start a goroutine to try to delete rs in the graceful delete rsList with an interval 1 minute
func (m *GracefulTerminationManager) Run() {
// before start, add leftover in delete rs to graceful delete rsList
vss, err := m.ipvs.GetVirtualServers()
if err != nil {
klog.Errorf("IPVS graceful delete manager failed to get IPVS virtualserver")
}
for _, vs := range vss {
rss, err := m.ipvs.GetRealServers(vs)
if err != nil {
klog.Errorf("IPVS graceful delete manager failed to get %v realserver", vs)
continue
}
for _, rs := range rss {
m.GracefulDeleteRS(vs, rs)
}
}
go wait.Until(m.tryDeleteRs, rsCheckDeleteInterval, wait.NeverStop)
}

View File

@ -18,11 +18,11 @@ package ipvs
import (
"k8s.io/apimachinery/pkg/util/sets"
utilversion "k8s.io/apimachinery/pkg/util/version"
utilipset "k8s.io/kubernetes/pkg/util/ipset"
utilversion "k8s.io/kubernetes/pkg/util/version"
"fmt"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@ -64,6 +64,12 @@ const (
kubeNodePortLocalSetUDPComment = "Kubernetes nodeport UDP port with externalTrafficPolicy=local"
kubeNodePortLocalSetUDP = "KUBE-NODE-PORT-LOCAL-UDP"
kubeNodePortSetSCTPComment = "Kubernetes nodeport SCTP port for masquerade purpose"
kubeNodePortSetSCTP = "KUBE-NODE-PORT-SCTP"
kubeNodePortLocalSetSCTPComment = "Kubernetes nodeport SCTP port with externalTrafficPolicy=local"
kubeNodePortLocalSetSCTP = "KUBE-NODE-PORT-LOCAL-SCTP"
)
// IPSetVersioner can query the current ipset version.
@ -119,7 +125,7 @@ func (set *IPSet) resetEntries() {
func (set *IPSet) syncIPSetEntries() {
appliedEntries, err := set.handle.ListEntries(set.Name)
if err != nil {
glog.Errorf("Failed to list ip set entries, error: %v", err)
klog.Errorf("Failed to list ip set entries, error: %v", err)
return
}
@ -134,18 +140,18 @@ func (set *IPSet) syncIPSetEntries() {
for _, entry := range currentIPSetEntries.Difference(set.activeEntries).List() {
if err := set.handle.DelEntry(entry, set.Name); err != nil {
if !utilipset.IsNotFoundError(err) {
glog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err)
klog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err)
}
} else {
glog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name)
klog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name)
}
}
// Create active entries
for _, entry := range set.activeEntries.Difference(currentIPSetEntries).List() {
if err := set.handle.AddEntry(entry, &set.IPSet, true); err != nil {
glog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err)
klog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err)
} else {
glog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name)
klog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name)
}
}
}
@ -153,7 +159,7 @@ func (set *IPSet) syncIPSetEntries() {
func ensureIPSet(set *IPSet) error {
if err := set.handle.CreateSet(&set.IPSet, true); err != nil {
glog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err)
klog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err)
return err
}
return nil
@ -163,13 +169,13 @@ func ensureIPSet(set *IPSet) error {
func checkMinVersion(vstring string) bool {
version, err := utilversion.ParseGeneric(vstring)
if err != nil {
glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err)
klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err)
return false
}
minVersion, err := utilversion.ParseGeneric(MinIPSetCheckVersion)
if err != nil {
glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err)
klog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err)
return false
}
return !version.LessThan(minVersion)

View File

@ -179,6 +179,26 @@ func TestSyncIPSetEntries(t *testing.T) {
currentEntries: []string{"80", "9090", "8081", "8082"},
expectedEntries: []string{"8080"},
},
{ // case 12
set: &utilipset.IPSet{
Name: "sctp-1",
},
setType: utilipset.HashIPPort,
ipv6: false,
activeEntries: []string{"172.17.0.4,sctp:80"},
currentEntries: nil,
expectedEntries: []string{"172.17.0.4,sctp:80"},
},
{ // case 1
set: &utilipset.IPSet{
Name: "sctp-2",
},
setType: utilipset.HashIPPort,
ipv6: true,
activeEntries: []string{"FE80::0202:B3FF:FE1E:8329,sctp:80"},
currentEntries: []string{"FE80::0202:B3FF:FE1E:8329,sctp:80"},
expectedEntries: []string{"FE80::0202:B3FF:FE1E:8329,sctp:80"},
},
}
for i := range testCases {

View File

@ -30,7 +30,9 @@ type NetLinkHandle interface {
EnsureDummyDevice(devName string) (exist bool, err error)
// DeleteDummyDevice deletes the given dummy device by name.
DeleteDummyDevice(devName string) error
// GetLocalAddresses returns all unique local type IP addresses based on filter device interface. If filter device is not given,
// it will list all unique local type addresses.
GetLocalAddresses(filterDev string) (sets.String, error)
// ListBindAddress will list all IP addresses which are bound in a given interface
ListBindAddress(devName string) ([]string, error)
// GetLocalAddresses returns all unique local type IP addresses based on specified device and filter device
// If device is not specified, it will list all unique local type addresses except filter device addresses
GetLocalAddresses(dev, filterDev string) (sets.String, error)
}

View File

@ -105,8 +105,25 @@ func (h *netlinkHandle) DeleteDummyDevice(devName string) error {
return h.LinkDel(dummy)
}
// ListBindAddress will list all IP addresses which are bound in a given interface
func (h *netlinkHandle) ListBindAddress(devName string) ([]string, error) {
dev, err := h.LinkByName(devName)
if err != nil {
return nil, fmt.Errorf("error get interface: %s, err: %v", devName, err)
}
addrs, err := h.AddrList(dev, 0)
if err != nil {
return nil, fmt.Errorf("error list bound address of interface: %s, err: %v", devName, err)
}
var ips []string
for _, addr := range addrs {
ips = append(ips, addr.IP.String())
}
return ips, nil
}
// GetLocalAddresses lists all LOCAL type IP addresses from host based on filter device.
// If filter device is not specified, it's equivalent to exec:
// If dev is not specified, it's equivalent to exec:
// $ ip route show table local type local proto kernel
// 10.0.0.1 dev kube-ipvs0 scope host src 10.0.0.1
// 10.0.0.10 dev kube-ipvs0 scope host src 10.0.0.10
@ -119,20 +136,28 @@ func (h *netlinkHandle) DeleteDummyDevice(devName string) error {
// Then cut the unique src IP fields,
// --> result set: [10.0.0.1, 10.0.0.10, 10.0.0.252, 100.106.89.164, 127.0.0.1, 192.168.122.1]
// If filter device is specified, it's equivalent to exec:
// If dev is specified, it's equivalent to exec:
// $ ip route show table local type local proto kernel dev kube-ipvs0
// 10.0.0.1 scope host src 10.0.0.1
// 10.0.0.10 scope host src 10.0.0.10
// Then cut the unique src IP fields,
// --> result set: [10.0.0.1, 10.0.0.10]
func (h *netlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
linkIndex := -1
if len(filterDev) != 0 {
// If filterDev is specified, the result will discard route of specified device and cut src from other routes.
func (h *netlinkHandle) GetLocalAddresses(dev, filterDev string) (sets.String, error) {
chosenLinkIndex, filterLinkIndex := -1, -1
if dev != "" {
link, err := h.LinkByName(dev)
if err != nil {
return nil, fmt.Errorf("error get device %s, err: %v", filterDev, err)
}
chosenLinkIndex = link.Attrs().Index
} else if filterDev != "" {
link, err := h.LinkByName(filterDev)
if err != nil {
return nil, fmt.Errorf("error get filter device %s, err: %v", filterDev, err)
}
linkIndex = link.Attrs().Index
filterLinkIndex = link.Attrs().Index
}
routeFilter := &netlink.Route{
@ -142,18 +167,20 @@ func (h *netlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error)
}
filterMask := netlink.RT_FILTER_TABLE | netlink.RT_FILTER_TYPE | netlink.RT_FILTER_PROTOCOL
// find filter device
if linkIndex != -1 {
routeFilter.LinkIndex = linkIndex
// find chosen device
if chosenLinkIndex != -1 {
routeFilter.LinkIndex = chosenLinkIndex
filterMask |= netlink.RT_FILTER_OIF
}
routes, err := h.RouteListFiltered(netlink.FAMILY_ALL, routeFilter, filterMask)
if err != nil {
return nil, fmt.Errorf("error list route table, err: %v", err)
}
res := sets.NewString()
for _, route := range routes {
if route.LinkIndex == filterLinkIndex {
continue
}
if route.Src != nil {
res.Insert(route.Src.String())
}

View File

@ -52,7 +52,12 @@ func (h *emptyHandle) DeleteDummyDevice(devName string) error {
return fmt.Errorf("netlink is not supported in this platform")
}
// GetLocalAddresses is part of interface.
func (h *emptyHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
// ListBindAddress is part of interface.
func (h *emptyHandle) ListBindAddress(devName string) ([]string, error) {
return nil, fmt.Errorf("netlink is not supported in this platform")
}
// GetLocalAddresses is part of interface.
func (h *emptyHandle) GetLocalAddresses(dev, filterDev string) (sets.String, error) {
return nil, fmt.Errorf("netlink is not supported in this platform")
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,7 @@ go_library(
tags = ["automanaged"],
deps = [
"//pkg/util/ipset:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)
@ -39,5 +39,5 @@ go_test(
name = "go_default_test",
srcs = ["fake_test.go"],
embed = [":go_default_library"],
deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"],
deps = ["//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library"],
)

View File

@ -39,36 +39,93 @@ func NewFakeNetlinkHandle() *FakeNetlinkHandle {
// EnsureAddressBind is a mock implementation
func (h *FakeNetlinkHandle) EnsureAddressBind(address, devName string) (exist bool, err error) {
if len(devName) == 0 {
return false, fmt.Errorf("Device name can't be empty")
}
if _, ok := h.localAddresses[devName]; !ok {
return false, fmt.Errorf("Error bind address: %s to a non-exist interface: %s", address, devName)
}
for _, addr := range h.localAddresses[devName] {
if addr == address {
// return true if the address is already bound to device
return true, nil
}
}
h.localAddresses[devName] = append(h.localAddresses[devName], address)
return false, nil
}
// UnbindAddress is a mock implementation
func (h *FakeNetlinkHandle) UnbindAddress(address, devName string) error {
return nil
if len(devName) == 0 {
return fmt.Errorf("Device name can't be empty")
}
if _, ok := h.localAddresses[devName]; !ok {
return fmt.Errorf("Error unbind address: %s from a non-exist interface: %s", address, devName)
}
for i, addr := range h.localAddresses[devName] {
if addr == address {
// delete address from slice h.localAddresses[devName]
h.localAddresses[devName] = append(h.localAddresses[devName][:i], h.localAddresses[devName][i+1:]...)
return nil
}
}
// return error message if address is not found in slice h.localAddresses[devName]
return fmt.Errorf("Address: %s is not found in interface: %s", address, devName)
}
// EnsureDummyDevice is a mock implementation
func (h *FakeNetlinkHandle) EnsureDummyDevice(devName string) (bool, error) {
return false, nil
if len(devName) == 0 {
return false, fmt.Errorf("Device name can't be empty")
}
if _, ok := h.localAddresses[devName]; !ok {
// create dummy interface if devName is not found in localAddress map
h.localAddresses[devName] = make([]string, 0)
return false, nil
}
// return true if devName is already created in localAddress map
return true, nil
}
// DeleteDummyDevice is a mock implementation
func (h *FakeNetlinkHandle) DeleteDummyDevice(devName string) error {
if len(devName) == 0 {
return fmt.Errorf("Device name can't be empty")
}
if _, ok := h.localAddresses[devName]; !ok {
return fmt.Errorf("Error deleting a non-exist interface: %s", devName)
}
delete(h.localAddresses, devName)
return nil
}
// ListBindAddress is a mock implementation
func (h *FakeNetlinkHandle) ListBindAddress(devName string) ([]string, error) {
if len(devName) == 0 {
return nil, fmt.Errorf("Device name can't be empty")
}
if _, ok := h.localAddresses[devName]; !ok {
return nil, fmt.Errorf("Error list addresses from a non-exist interface: %s", devName)
}
return h.localAddresses[devName], nil
}
// GetLocalAddresses is a mock implementation
func (h *FakeNetlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
func (h *FakeNetlinkHandle) GetLocalAddresses(dev, filterDev string) (sets.String, error) {
res := sets.NewString()
if len(filterDev) != 0 {
if len(dev) != 0 {
// list all addresses from a given network interface.
for _, addr := range h.localAddresses[filterDev] {
for _, addr := range h.localAddresses[dev] {
res.Insert(addr)
}
return res, nil
}
// If filterDev is not given, will list all addresses from all available network interface.
for linkName := range h.localAddresses {
if linkName == filterDev {
continue
}
// list all addresses from a given network interface.
for _, addr := range h.localAddresses[linkName] {
res.Insert(addr)

View File

@ -27,21 +27,21 @@ func TestSetGetLocalAddresses(t *testing.T) {
fake := NewFakeNetlinkHandle()
fake.SetLocalAddresses("eth0", "1.2.3.4")
expected := sets.NewString("1.2.3.4")
addr, _ := fake.GetLocalAddresses("eth0")
addr, _ := fake.GetLocalAddresses("eth0", "")
if !reflect.DeepEqual(expected, addr) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, addr)
}
list, _ := fake.GetLocalAddresses("")
list, _ := fake.GetLocalAddresses("", "")
if !reflect.DeepEqual(expected, list) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, list)
}
fake.SetLocalAddresses("lo", "127.0.0.1")
expected = sets.NewString("127.0.0.1")
addr, _ = fake.GetLocalAddresses("lo")
addr, _ = fake.GetLocalAddresses("lo", "")
if !reflect.DeepEqual(expected, addr) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, addr)
}
list, _ = fake.GetLocalAddresses("")
list, _ = fake.GetLocalAddresses("", "")
expected = sets.NewString("1.2.3.4", "127.0.0.1")
if !reflect.DeepEqual(expected, list) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, list)

View File

@ -23,13 +23,13 @@ import (
"strings"
"sync"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
apiservice "k8s.io/kubernetes/pkg/api/service"
api "k8s.io/kubernetes/pkg/apis/core"
apiservice "k8s.io/kubernetes/pkg/api/v1/service"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
utilnet "k8s.io/kubernetes/pkg/util/net"
)
@ -41,10 +41,10 @@ import (
type BaseServiceInfo struct {
ClusterIP net.IP
Port int
Protocol api.Protocol
Protocol v1.Protocol
NodePort int
LoadBalancerStatus api.LoadBalancerStatus
SessionAffinityType api.ServiceAffinity
LoadBalancerStatus v1.LoadBalancerStatus
SessionAffinityType v1.ServiceAffinity
StickyMaxAgeSeconds int
ExternalIPs []string
LoadBalancerSourceRanges []string
@ -65,7 +65,7 @@ func (info *BaseServiceInfo) ClusterIPString() string {
}
// GetProtocol is part of ServicePort interface.
func (info *BaseServiceInfo) GetProtocol() api.Protocol {
func (info *BaseServiceInfo) GetProtocol() v1.Protocol {
return info.Protocol
}
@ -74,13 +74,13 @@ func (info *BaseServiceInfo) GetHealthCheckNodePort() int {
return info.HealthCheckNodePort
}
func (sct *ServiceChangeTracker) newBaseServiceInfo(port *api.ServicePort, service *api.Service) *BaseServiceInfo {
func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, service *v1.Service) *BaseServiceInfo {
onlyNodeLocalEndpoints := false
if apiservice.RequestsOnlyLocalTraffic(service) {
onlyNodeLocalEndpoints = true
}
var stickyMaxAgeSeconds int
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
// Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP
stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
@ -119,7 +119,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *api.ServicePort, servi
if apiservice.NeedsHealthCheck(service) {
p := service.Spec.HealthCheckNodePort
if p == 0 {
glog.Errorf("Service %s/%s has no healthcheck nodeport", service.Namespace, service.Name)
klog.Errorf("Service %s/%s has no healthcheck nodeport", service.Namespace, service.Name)
} else {
info.HealthCheckNodePort = int(p)
}
@ -128,7 +128,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *api.ServicePort, servi
return info
}
type makeServicePortFunc func(*api.ServicePort, *api.Service, *BaseServiceInfo) ServicePort
type makeServicePortFunc func(*v1.ServicePort, *v1.Service, *BaseServiceInfo) ServicePort
// serviceChange contains all changes to services that happened since proxy rules were synced. For a single object,
// changes are accumulated, i.e. previous is state from before applying the changes,
@ -170,7 +170,7 @@ func NewServiceChangeTracker(makeServiceInfo makeServicePortFunc, isIPv6Mode *bo
// - pass <oldService, service> as the <previous, current> pair.
// Delete item
// - pass <service, nil> as the <previous, current> pair.
func (sct *ServiceChangeTracker) Update(previous, current *api.Service) bool {
func (sct *ServiceChangeTracker) Update(previous, current *v1.Service) bool {
svc := current
if svc == nil {
svc = previous
@ -231,7 +231,7 @@ type ServiceMap map[ServicePortName]ServicePort
// serviceToServiceMap translates a single Service object to a ServiceMap.
//
// NOTE: service object should NOT be modified.
func (sct *ServiceChangeTracker) serviceToServiceMap(service *api.Service) ServiceMap {
func (sct *ServiceChangeTracker) serviceToServiceMap(service *v1.Service) ServiceMap {
if service == nil {
return nil
}
@ -306,9 +306,9 @@ func (sm *ServiceMap) merge(other ServiceMap) sets.String {
existingPorts.Insert(svcPortName.String())
_, exists := (*sm)[svcPortName]
if !exists {
glog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String())
klog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String())
} else {
glog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String())
klog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String())
}
(*sm)[svcPortName] = info
}
@ -331,13 +331,13 @@ func (sm *ServiceMap) unmerge(other ServiceMap, UDPStaleClusterIP sets.String) {
for svcPortName := range other {
info, exists := (*sm)[svcPortName]
if exists {
glog.V(1).Infof("Removing service port %q", svcPortName)
if info.GetProtocol() == api.ProtocolUDP {
klog.V(1).Infof("Removing service port %q", svcPortName)
if info.GetProtocol() == v1.ProtocolUDP {
UDPStaleClusterIP.Insert(info.ClusterIPString())
}
delete(*sm, svcPortName)
} else {
glog.Errorf("Service port %q doesn't exists", svcPortName)
klog.Errorf("Service port %q doesn't exists", svcPortName)
}
}
}

View File

@ -22,11 +22,11 @@ import (
"github.com/davecgh/go-spew/spew"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
api "k8s.io/kubernetes/pkg/apis/core"
)
const testHostname = "test-hostname"
@ -35,7 +35,7 @@ func makeTestServiceInfo(clusterIP string, port int, protocol string, healthchec
info := &BaseServiceInfo{
ClusterIP: net.ParseIP(clusterIP),
Port: port,
Protocol: api.Protocol(protocol),
Protocol: v1.Protocol(protocol),
}
if healthcheckNodePort != 0 {
info.HealthCheckNodePort = healthcheckNodePort
@ -46,22 +46,22 @@ func makeTestServiceInfo(clusterIP string, port int, protocol string, healthchec
return info
}
func makeTestService(namespace, name string, svcFunc func(*api.Service)) *api.Service {
svc := &api.Service{
func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{},
},
Spec: api.ServiceSpec{},
Status: api.ServiceStatus{},
Spec: v1.ServiceSpec{},
Status: v1.ServiceStatus{},
}
svcFunc(svc)
return svc
}
func addTestPort(array []api.ServicePort, name string, protocol api.Protocol, port, nodeport int32, targetPort int) []api.ServicePort {
svcPort := api.ServicePort{
func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
svcPort := v1.ServicePort{
Name: name,
Protocol: protocol,
Port: port,
@ -96,7 +96,7 @@ func TestServiceToServiceMap(t *testing.T) {
testCases := []struct {
desc string
service *api.Service
service *v1.Service
expected map[ServicePortName]*BaseServiceInfo
isIPv6Mode *bool
}{
@ -107,25 +107,34 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "headless service",
service: makeTestService("ns2", "headless", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = api.ClusterIPNone
service: makeTestService("ns2", "headless", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = v1.ClusterIPNone
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
}),
expected: map[ServicePortName]*BaseServiceInfo{},
},
{
desc: "headless sctp service",
service: makeTestService("ns2", "headless", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = v1.ClusterIPNone
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sip", "SCTP", 7777, 0, 0)
}),
expected: map[ServicePortName]*BaseServiceInfo{},
},
{
desc: "headless service without port",
service: makeTestService("ns2", "headless-without-port", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = api.ClusterIPNone
service: makeTestService("ns2", "headless-without-port", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = v1.ClusterIPNone
}),
expected: map[ServicePortName]*BaseServiceInfo{},
},
{
desc: "cluster ip service",
service: makeTestService("ns2", "cluster-ip", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
service: makeTestService("ns2", "cluster-ip", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p1", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "UDP", 1235, 5321, 0)
@ -137,8 +146,8 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "nodeport service",
service: makeTestService("ns2", "node-port", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeNodePort
service: makeTestService("ns2", "node-port", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.ClusterIP = "172.16.55.10"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 345, 678, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "TCP", 344, 677, 0)
@ -150,14 +159,14 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "load balancer service",
service: makeTestService("ns1", "load-balancer", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer
service: makeTestService("ns1", "load-balancer", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.11"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port3", "UDP", 8675, 30061, 7000)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port4", "UDP", 8676, 30062, 7001)
svc.Status.LoadBalancer = api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.4"},
},
}
@ -169,18 +178,18 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "load balancer service with only local traffic policy",
service: makeTestService("ns1", "only-local-load-balancer", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer
service: makeTestService("ns1", "only-local-load-balancer", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.12"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "portx", "UDP", 8677, 30063, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "porty", "UDP", 8678, 30064, 7003)
svc.Status.LoadBalancer = api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
}),
expected: map[ServicePortName]*BaseServiceInfo{
@ -190,8 +199,8 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "external name service",
service: makeTestService("ns2", "external-name", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeExternalName
service: makeTestService("ns2", "external-name", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeExternalName
svc.Spec.ClusterIP = "172.16.55.4" // Should be ignored
svc.Spec.ExternalName = "foo2.bar.com"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "portz", "UDP", 1235, 5321, 0)
@ -200,18 +209,18 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "service with ipv6 clusterIP under ipv4 mode, service should be filtered",
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "invalidIPv6InIPV4Mode",
Namespace: "test",
},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: testClusterIPv6,
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
Protocol: v1.ProtocolTCP,
},
},
},
@ -220,18 +229,18 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "service with ipv4 clusterIP under ipv6 mode, service should be filtered",
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "invalidIPv4InIPV6Mode",
Namespace: "test",
},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: testClusterIPv4,
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
Protocol: v1.ProtocolTCP,
},
},
},
@ -240,20 +249,20 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "service with ipv4 configurations under ipv4 mode",
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "validIPv4",
Namespace: "test",
},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: testClusterIPv4,
ExternalIPs: []string{testExternalIPv4},
LoadBalancerSourceRanges: []string{testSourceRangeIPv4},
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
Protocol: v1.ProtocolTCP,
},
},
},
@ -268,20 +277,20 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "service with ipv6 configurations under ipv6 mode",
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "validIPv6",
Namespace: "test",
},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: testClusterIPv6,
ExternalIPs: []string{testExternalIPv6},
LoadBalancerSourceRanges: []string{testSourceRangeIPv6},
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
Protocol: v1.ProtocolTCP,
},
},
},
@ -296,20 +305,20 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "service with both ipv4 and ipv6 configurations under ipv4 mode, ipv6 fields should be filtered",
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "filterIPv6InIPV4Mode",
Namespace: "test",
},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: testClusterIPv4,
ExternalIPs: []string{testExternalIPv4, testExternalIPv6},
LoadBalancerSourceRanges: []string{testSourceRangeIPv4, testSourceRangeIPv6},
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
Protocol: v1.ProtocolTCP,
},
},
},
@ -324,20 +333,20 @@ func TestServiceToServiceMap(t *testing.T) {
},
{
desc: "service with both ipv4 and ipv6 configurations under ipv6 mode, ipv4 fields should be filtered",
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "filterIPv4InIPV6Mode",
Namespace: "test",
},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: testClusterIPv6,
ExternalIPs: []string{testExternalIPv4, testExternalIPv6},
LoadBalancerSourceRanges: []string{testSourceRangeIPv4, testSourceRangeIPv6},
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{
Name: "testPort",
Port: int32(12345),
Protocol: api.ProtocolTCP,
Protocol: v1.ProtocolTCP,
},
},
},
@ -391,21 +400,21 @@ func newFakeProxier() *FakeProxier {
}
}
func makeServiceMap(fake *FakeProxier, allServices ...*api.Service) {
func makeServiceMap(fake *FakeProxier, allServices ...*v1.Service) {
for i := range allServices {
fake.addService(allServices[i])
}
}
func (fake *FakeProxier) addService(service *api.Service) {
func (fake *FakeProxier) addService(service *v1.Service) {
fake.serviceChanges.Update(nil, service)
}
func (fake *FakeProxier) updateService(oldService *api.Service, service *api.Service) {
func (fake *FakeProxier) updateService(oldService *v1.Service, service *v1.Service) {
fake.serviceChanges.Update(oldService, service)
}
func (fake *FakeProxier) deleteService(service *api.Service) {
func (fake *FakeProxier) deleteService(service *v1.Service) {
fake.serviceChanges.Update(service, nil)
}
@ -413,14 +422,14 @@ func TestUpdateServiceMapHeadless(t *testing.T) {
fp := newFakeProxier()
makeServiceMap(fp,
makeTestService("ns2", "headless", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = api.ClusterIPNone
makeTestService("ns2", "headless", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = v1.ClusterIPNone
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
}),
makeTestService("ns2", "headless-without-port", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
svc.Spec.ClusterIP = api.ClusterIPNone
makeTestService("ns2", "headless-without-port", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = v1.ClusterIPNone
}),
)
@ -444,8 +453,8 @@ func TestUpdateServiceTypeExternalName(t *testing.T) {
fp := newFakeProxier()
makeServiceMap(fp,
makeTestService("ns2", "external-name", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeExternalName
makeTestService("ns2", "external-name", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeExternalName
svc.Spec.ClusterIP = "172.16.55.4" // Should be ignored
svc.Spec.ExternalName = "foo2.bar.com"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
@ -468,43 +477,43 @@ func TestUpdateServiceTypeExternalName(t *testing.T) {
func TestBuildServiceMapAddRemove(t *testing.T) {
fp := newFakeProxier()
services := []*api.Service{
makeTestService("ns2", "cluster-ip", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
services := []*v1.Service{
makeTestService("ns2", "cluster-ip", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "UDP", 1235, 5321, 0)
}),
makeTestService("ns2", "node-port", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeNodePort
makeTestService("ns2", "node-port", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.ClusterIP = "172.16.55.10"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port1", "UDP", 345, 678, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "port2", "TCP", 344, 677, 0)
}),
makeTestService("ns1", "load-balancer", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer
makeTestService("ns1", "load-balancer", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.11"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
svc.Status.LoadBalancer = api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.4"},
},
}
}),
makeTestService("ns1", "only-local-load-balancer", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer
makeTestService("ns1", "only-local-load-balancer", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.12"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
svc.Status.LoadBalancer = api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
}),
}
@ -534,8 +543,8 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
// Remove some stuff
// oneService is a modification of services[0] with removed first port.
oneService := makeTestService("ns2", "cluster-ip", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
oneService := makeTestService("ns2", "cluster-ip", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "UDP", 1235, 5321, 0)
})
@ -571,24 +580,24 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
func TestBuildServiceMapServiceUpdate(t *testing.T) {
fp := newFakeProxier()
servicev1 := makeTestService("ns1", "svc1", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeClusterIP
servicev1 := makeTestService("ns1", "svc1", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p1", "UDP", 1234, 4321, 0)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "TCP", 1235, 5321, 0)
})
servicev2 := makeTestService("ns1", "svc1", func(svc *api.Service) {
svc.Spec.Type = api.ServiceTypeLoadBalancer
servicev2 := makeTestService("ns1", "svc1", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
svc.Spec.ClusterIP = "172.16.55.4"
svc.Spec.LoadBalancerIP = "5.6.7.8"
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p1", "UDP", 1234, 4321, 7002)
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "p2", "TCP", 1235, 5321, 7003)
svc.Status.LoadBalancer = api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "10.1.2.3"},
},
}
svc.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = 345
})

View File

@ -19,8 +19,8 @@ package proxy
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
)
// ProxyProvider is the interface provided by proxier implementations.
@ -51,7 +51,7 @@ type ServicePort interface {
// ClusterIPString returns service cluster IP in string format.
ClusterIPString() string
// GetProtocol returns service protocol.
GetProtocol() api.Protocol
GetProtocol() v1.Protocol
// GetHealthCheckNodePort returns service health check node port if present. If return 0, it means not present.
GetHealthCheckNodePort() int
}

View File

@ -13,60 +13,26 @@ go_library(
"port_allocator.go",
"proxier.go",
"proxysocket.go",
"rlimit.go",
"rlimit_windows.go",
"roundrobin.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"rlimit.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"rlimit_windows.go",
],
"//conditions:default": [],
}),
],
importpath = "k8s.io/kubernetes/pkg/proxy/userspace",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/util/conntrack:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/slice:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:android": [
@ -112,13 +78,13 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/util/iptables/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],

View File

@ -17,7 +17,7 @@ limitations under the License.
package userspace
import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/proxy"
"net"
)
@ -27,7 +27,7 @@ type LoadBalancer interface {
// NextEndpoint returns the endpoint to handle a request for the given
// service-port and source address.
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeSeconds int) error
NewService(service proxy.ServicePortName, sessionAffinityType v1.ServiceAffinity, stickyMaxAgeSeconds int) error
DeleteService(service proxy.ServicePortName)
CleanupStaleStickySessions(service proxy.ServicePortName)
ServiceHasEndpoints(service proxy.ServicePortName) bool

View File

@ -25,17 +25,15 @@ import (
"sync/atomic"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/proxy"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/proxy"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
"k8s.io/kubernetes/pkg/util/conntrack"
"k8s.io/kubernetes/pkg/util/iptables"
@ -57,12 +55,12 @@ type ServiceInfo struct {
isAliveAtomic int32 // Only access this with atomic ops
portal portal
protocol api.Protocol
protocol v1.Protocol
proxyPort int
socket ProxySocket
nodePort int
loadBalancerStatus api.LoadBalancerStatus
sessionAffinityType api.ServiceAffinity
loadBalancerStatus v1.LoadBalancerStatus
sessionAffinityType v1.ServiceAffinity
stickyMaxAgeSeconds int
// Deprecated, but required for back-compat (including e2e)
externalIPs []string
@ -83,7 +81,7 @@ func (info *ServiceInfo) IsAlive() bool {
func logTimeout(err error) bool {
if e, ok := err.(net.Error); ok {
if e.Timeout() {
glog.V(3).Infof("connection to endpoint closed due to inactivity")
klog.V(3).Infof("connection to endpoint closed due to inactivity")
return true
}
}
@ -91,7 +89,7 @@ func logTimeout(err error) bool {
}
// ProxySocketFunc is a function which constructs a ProxySocket from a protocol, ip, and port
type ProxySocketFunc func(protocol api.Protocol, ip net.IP, port int) (ProxySocket, error)
type ProxySocketFunc func(protocol v1.Protocol, ip net.IP, port int) (ProxySocket, error)
// Proxier is a simple proxy for TCP connections between a localhost:lport
// and services that provide the actual implementations.
@ -121,7 +119,7 @@ var _ proxy.ProxyProvider = &Proxier{}
type portMapKey struct {
ip string
port int
protocol api.Protocol
protocol v1.Protocol
}
func (k *portMapKey) String() string {
@ -186,7 +184,7 @@ func NewCustomProxier(loadBalancer LoadBalancer, listenIP net.IP, iptables iptab
proxyPorts := newPortAllocator(pr)
glog.V(2).Infof("Setting proxy IP to %v and initializing iptables", hostIP)
klog.V(2).Infof("Setting proxy IP to %v and initializing iptables", hostIP)
return createProxier(loadBalancer, listenIP, iptables, exec, hostIP, proxyPorts, syncPeriod, minSyncPeriod, udpIdleTimeout, makeProxySocket)
}
@ -231,13 +229,13 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) {
args := []string{"-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules"}
if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostPortalChain))...); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error removing userspace rule: %v", err)
klog.Errorf("Error removing userspace rule: %v", err)
encounteredError = true
}
}
if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerPortalChain))...); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error removing userspace rule: %v", err)
klog.Errorf("Error removing userspace rule: %v", err)
encounteredError = true
}
}
@ -245,20 +243,20 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) {
args = append(args, "-m", "comment", "--comment", "handle service NodePorts; NOTE: this must be the last rule in the chain")
if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostNodePortChain))...); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error removing userspace rule: %v", err)
klog.Errorf("Error removing userspace rule: %v", err)
encounteredError = true
}
}
if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerNodePortChain))...); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error removing userspace rule: %v", err)
klog.Errorf("Error removing userspace rule: %v", err)
encounteredError = true
}
}
args = []string{"-m", "comment", "--comment", "Ensure that non-local NodePort traffic can flow"}
if err := ipt.DeleteRule(iptables.TableFilter, iptables.ChainInput, append(args, "-j", string(iptablesNonLocalNodePortChain))...); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error removing userspace rule: %v", err)
klog.Errorf("Error removing userspace rule: %v", err)
encounteredError = true
}
}
@ -273,13 +271,13 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) {
// flush chain, then if successful delete, delete will fail if flush fails.
if err := ipt.FlushChain(table, c); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error flushing userspace chain: %v", err)
klog.Errorf("Error flushing userspace chain: %v", err)
encounteredError = true
}
} else {
if err = ipt.DeleteChain(table, c); err != nil {
if !iptables.IsNotFoundError(err) {
glog.Errorf("Error deleting userspace chain: %v", err)
klog.Errorf("Error deleting userspace chain: %v", err)
encounteredError = true
}
}
@ -292,7 +290,7 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) {
// Sync is called to immediately synchronize the proxier state to iptables
func (proxier *Proxier) Sync() {
if err := iptablesInit(proxier.iptables); err != nil {
glog.Errorf("Failed to ensure iptables: %v", err)
klog.Errorf("Failed to ensure iptables: %v", err)
}
proxier.ensurePortals()
proxier.cleanupStaleStickySessions()
@ -304,7 +302,7 @@ func (proxier *Proxier) SyncLoop() {
defer t.Stop()
for {
<-t.C
glog.V(6).Infof("Periodic sync")
klog.V(6).Infof("Periodic sync")
proxier.Sync()
}
}
@ -317,7 +315,7 @@ func (proxier *Proxier) ensurePortals() {
for name, info := range proxier.serviceMap {
err := proxier.openPortal(name, info)
if err != nil {
glog.Errorf("Failed to ensure portal for %q: %v", name, err)
klog.Errorf("Failed to ensure portal for %q: %v", name, err)
}
}
}
@ -364,7 +362,7 @@ func (proxier *Proxier) setServiceInfo(service proxy.ServicePortName, info *Serv
// addServiceOnPort starts listening for a new service, returning the ServiceInfo.
// Pass proxyPort=0 to allocate a random port. The timeout only applies to UDP
// connections, for now.
func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol api.Protocol, proxyPort int, timeout time.Duration) (*ServiceInfo, error) {
func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol v1.Protocol, proxyPort int, timeout time.Duration) (*ServiceInfo, error) {
sock, err := proxier.makeProxySocket(protocol, proxier.listenIP, proxyPort)
if err != nil {
return nil, err
@ -386,11 +384,11 @@ func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol
proxyPort: portNum,
protocol: protocol,
socket: sock,
sessionAffinityType: api.ServiceAffinityNone, // default
sessionAffinityType: v1.ServiceAffinityNone, // default
}
proxier.setServiceInfo(service, si)
glog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum)
klog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum)
go func(service proxy.ServicePortName, proxier *Proxier) {
defer runtime.HandleCrash()
atomic.AddInt32(&proxier.numProxyLoops, 1)
@ -401,13 +399,13 @@ func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol
return si, nil
}
func (proxier *Proxier) mergeService(service *api.Service) sets.String {
func (proxier *Proxier) mergeService(service *v1.Service) sets.String {
if service == nil {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return nil
}
existingPorts := sets.NewString()
@ -422,25 +420,25 @@ func (proxier *Proxier) mergeService(service *api.Service) sets.String {
continue
}
if exists {
glog.V(4).Infof("Something changed for service %q: stopping it", serviceName)
klog.V(4).Infof("Something changed for service %q: stopping it", serviceName)
if err := proxier.closePortal(serviceName, info); err != nil {
glog.Errorf("Failed to close portal for %q: %v", serviceName, err)
klog.Errorf("Failed to close portal for %q: %v", serviceName, err)
}
if err := proxier.stopProxy(serviceName, info); err != nil {
glog.Errorf("Failed to stop service %q: %v", serviceName, err)
klog.Errorf("Failed to stop service %q: %v", serviceName, err)
}
}
proxyPort, err := proxier.proxyPorts.AllocateNext()
if err != nil {
glog.Errorf("failed to allocate proxy port for service %q: %v", serviceName, err)
klog.Errorf("failed to allocate proxy port for service %q: %v", serviceName, err)
continue
}
serviceIP := net.ParseIP(service.Spec.ClusterIP)
glog.V(1).Infof("Adding new service %q at %s/%s", serviceName, net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), servicePort.Protocol)
klog.V(1).Infof("Adding new service %q at %s/%s", serviceName, net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), servicePort.Protocol)
info, err = proxier.addServiceOnPort(serviceName, servicePort.Protocol, proxyPort, proxier.udpIdleTimeout)
if err != nil {
glog.Errorf("Failed to start proxy for %q: %v", serviceName, err)
klog.Errorf("Failed to start proxy for %q: %v", serviceName, err)
continue
}
info.portal.ip = serviceIP
@ -451,14 +449,14 @@ func (proxier *Proxier) mergeService(service *api.Service) sets.String {
info.nodePort = int(servicePort.NodePort)
info.sessionAffinityType = service.Spec.SessionAffinity
// Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
info.stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
glog.V(4).Infof("info: %#v", info)
klog.V(4).Infof("info: %#v", info)
if err := proxier.openPortal(serviceName, info); err != nil {
glog.Errorf("Failed to open portal for %q: %v", serviceName, err)
klog.Errorf("Failed to open portal for %q: %v", serviceName, err)
}
proxier.loadBalancer.NewService(serviceName, info.sessionAffinityType, info.stickyMaxAgeSeconds)
}
@ -466,13 +464,13 @@ func (proxier *Proxier) mergeService(service *api.Service) sets.String {
return existingPorts
}
func (proxier *Proxier) unmergeService(service *api.Service, existingPorts sets.String) {
func (proxier *Proxier) unmergeService(service *v1.Service, existingPorts sets.String) {
if service == nil {
return
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return
}
@ -486,49 +484,49 @@ func (proxier *Proxier) unmergeService(service *api.Service, existingPorts sets.
}
serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
glog.V(1).Infof("Stopping service %q", serviceName)
klog.V(1).Infof("Stopping service %q", serviceName)
info, exists := proxier.serviceMap[serviceName]
if !exists {
glog.Errorf("Service %q is being removed but doesn't exist", serviceName)
klog.Errorf("Service %q is being removed but doesn't exist", serviceName)
continue
}
if proxier.serviceMap[serviceName].protocol == api.ProtocolUDP {
if proxier.serviceMap[serviceName].protocol == v1.ProtocolUDP {
staleUDPServices.Insert(proxier.serviceMap[serviceName].portal.ip.String())
}
if err := proxier.closePortal(serviceName, info); err != nil {
glog.Errorf("Failed to close portal for %q: %v", serviceName, err)
klog.Errorf("Failed to close portal for %q: %v", serviceName, err)
}
if err := proxier.stopProxyInternal(serviceName, info); err != nil {
glog.Errorf("Failed to stop service %q: %v", serviceName, err)
klog.Errorf("Failed to stop service %q: %v", serviceName, err)
}
proxier.loadBalancer.DeleteService(serviceName)
}
for _, svcIP := range staleUDPServices.UnsortedList() {
if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil {
glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err)
klog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err)
}
}
}
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
func (proxier *Proxier) OnServiceAdd(service *v1.Service) {
_ = proxier.mergeService(service)
}
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) {
existingPorts := proxier.mergeService(service)
proxier.unmergeService(oldService, existingPorts)
}
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
func (proxier *Proxier) OnServiceDelete(service *v1.Service) {
proxier.unmergeService(service, sets.NewString())
}
func (proxier *Proxier) OnServiceSynced() {
}
func sameConfig(info *ServiceInfo, service *api.Service, port *api.ServicePort) bool {
func sameConfig(info *ServiceInfo, service *v1.Service, port *v1.ServicePort) bool {
if info.protocol != port.Protocol || info.portal.port != int(port.Port) || info.nodePort != int(port.NodePort) {
return false
}
@ -587,7 +585,7 @@ func (proxier *Proxier) openPortal(service proxy.ServicePortName, info *ServiceI
return nil
}
func (proxier *Proxier) openOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
func (proxier *Proxier) openOnePortal(portal portal, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
if local, err := utilproxy.IsLocalIP(portal.ip.String()); err != nil {
return fmt.Errorf("can't determine if IP %s is local, assuming not: %v", portal.ip, err)
} else if local {
@ -602,31 +600,31 @@ func (proxier *Proxier) openOnePortal(portal portal, protocol api.Protocol, prox
portalAddress := net.JoinHostPort(portal.ip.String(), strconv.Itoa(portal.port))
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q, args:%v", iptablesContainerPortalChain, name, args)
klog.Errorf("Failed to install iptables %s rule for service %q, args:%v", iptablesContainerPortalChain, name, args)
return err
}
if !existed {
glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s", name, protocol, portalAddress)
klog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s", name, protocol, portalAddress)
}
if portal.isExternal {
args := proxier.iptablesContainerPortalArgs(portal.ip, false, true, portal.port, protocol, proxyIP, proxyPort, name)
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule that opens service %q for local traffic, args:%v", iptablesContainerPortalChain, name, args)
klog.Errorf("Failed to install iptables %s rule that opens service %q for local traffic, args:%v", iptablesContainerPortalChain, name, args)
return err
}
if !existed {
glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s for local traffic", name, protocol, portalAddress)
klog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s for local traffic", name, protocol, portalAddress)
}
args = proxier.iptablesHostPortalArgs(portal.ip, true, portal.port, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q for dst-local traffic", iptablesHostPortalChain, name)
klog.Errorf("Failed to install iptables %s rule for service %q for dst-local traffic", iptablesHostPortalChain, name)
return err
}
if !existed {
glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s for dst-local traffic", name, protocol, portalAddress)
klog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s for dst-local traffic", name, protocol, portalAddress)
}
return nil
}
@ -635,18 +633,18 @@ func (proxier *Proxier) openOnePortal(portal portal, protocol api.Protocol, prox
args = proxier.iptablesHostPortalArgs(portal.ip, false, portal.port, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name)
klog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name)
return err
}
if !existed {
glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s", name, protocol, portalAddress)
klog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s", name, protocol, portalAddress)
}
return nil
}
// Marks a port as being owned by a particular service, or returns error if already claimed.
// Idempotent: reclaiming with the same owner is not an error
func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol api.Protocol, owner proxy.ServicePortName) error {
func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol v1.Protocol, owner proxy.ServicePortName) error {
proxier.portMapMutex.Lock()
defer proxier.portMapMutex.Unlock()
@ -667,7 +665,7 @@ func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol api.Protocol
return fmt.Errorf("can't open node port for %s: %v", key.String(), err)
}
proxier.portMap[key] = &portMapValue{owner: owner, socket: socket}
glog.V(2).Infof("Claimed local port %s", key.String())
klog.V(2).Infof("Claimed local port %s", key.String())
return nil
}
if existing.owner == owner {
@ -679,7 +677,7 @@ func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol api.Protocol
// Release a claim on a port. Returns an error if the owner does not match the claim.
// Tolerates release on an unclaimed port, to simplify .
func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol api.Protocol, owner proxy.ServicePortName) error {
func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol v1.Protocol, owner proxy.ServicePortName) error {
proxier.portMapMutex.Lock()
defer proxier.portMapMutex.Unlock()
@ -687,7 +685,7 @@ func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol api.Protoc
existing, found := proxier.portMap[key]
if !found {
// We tolerate this, it happens if we are cleaning up a failed allocation
glog.Infof("Ignoring release on unowned port: %v", key)
klog.Infof("Ignoring release on unowned port: %v", key)
return nil
}
if existing.owner != owner {
@ -698,7 +696,7 @@ func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol api.Protoc
return nil
}
func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
func (proxier *Proxier) openNodePort(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
// TODO: Do we want to allow containers to access public services? Probably yes.
// TODO: We could refactor this to be the same code as portal, but with IP == nil
@ -708,35 +706,35 @@ func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyI
}
// Handle traffic from containers.
args := proxier.iptablesContainerNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
args := proxier.iptablesContainerPortalArgs(nil, false, false, nodePort, protocol, proxyIP, proxyPort, name)
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerNodePortChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name)
klog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name)
return err
}
if !existed {
glog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort)
klog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort)
}
// Handle traffic from the host.
args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostNodePortChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name)
klog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name)
return err
}
if !existed {
glog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort)
klog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort)
}
args = proxier.iptablesNonLocalNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableFilter, iptablesNonLocalNodePortChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesNonLocalNodePortChain, name)
klog.Errorf("Failed to install iptables %s rule for service %q", iptablesNonLocalNodePortChain, name)
return err
}
if !existed {
glog.Infof("Opened iptables from-non-local public port for service %q on %s port %d", name, protocol, nodePort)
klog.Infof("Opened iptables from-non-local public port for service %q on %s port %d", name, protocol, nodePort)
}
return nil
@ -757,14 +755,14 @@ func (proxier *Proxier) closePortal(service proxy.ServicePortName, info *Service
el = append(el, proxier.closeNodePort(info.nodePort, info.protocol, proxier.listenIP, info.proxyPort, service)...)
}
if len(el) == 0 {
glog.V(3).Infof("Closed iptables portals for service %q", service)
klog.V(3).Infof("Closed iptables portals for service %q", service)
} else {
glog.Errorf("Some errors closing iptables portals for service %q", service)
klog.Errorf("Some errors closing iptables portals for service %q", service)
}
return utilerrors.NewAggregate(el)
}
func (proxier *Proxier) closeOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error {
func (proxier *Proxier) closeOnePortal(portal portal, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error {
el := []error{}
if local, err := utilproxy.IsLocalIP(portal.ip.String()); err != nil {
@ -778,20 +776,20 @@ func (proxier *Proxier) closeOnePortal(portal portal, protocol api.Protocol, pro
// Handle traffic from containers.
args := proxier.iptablesContainerPortalArgs(portal.ip, portal.isExternal, false, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name)
el = append(el, err)
}
if portal.isExternal {
args := proxier.iptablesContainerPortalArgs(portal.ip, false, true, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name)
el = append(el, err)
}
args = proxier.iptablesHostPortalArgs(portal.ip, true, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name)
el = append(el, err)
}
return el
@ -800,34 +798,34 @@ func (proxier *Proxier) closeOnePortal(portal portal, protocol api.Protocol, pro
// Handle traffic from the host (portalIP is not external).
args = proxier.iptablesHostPortalArgs(portal.ip, false, portal.port, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name)
el = append(el, err)
}
return el
}
func (proxier *Proxier) closeNodePort(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error {
func (proxier *Proxier) closeNodePort(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error {
el := []error{}
// Handle traffic from containers.
args := proxier.iptablesContainerNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
args := proxier.iptablesContainerPortalArgs(nil, false, false, nodePort, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerNodePortChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerNodePortChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerNodePortChain, name)
el = append(el, err)
}
// Handle traffic from the host.
args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostNodePortChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostNodePortChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostNodePortChain, name)
el = append(el, err)
}
// Handle traffic not local to the host
args = proxier.iptablesNonLocalNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableFilter, iptablesNonLocalNodePortChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesNonLocalNodePortChain, name)
klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesNonLocalNodePortChain, name)
el = append(el, err)
}
@ -936,7 +934,7 @@ func iptablesFlush(ipt iptables.Interface) error {
el = append(el, err)
}
if len(el) != 0 {
glog.Errorf("Some errors flushing old iptables portals: %v", el)
klog.Errorf("Some errors flushing old iptables portals: %v", el)
}
return utilerrors.NewAggregate(el)
}
@ -949,7 +947,7 @@ var zeroIPv6 = net.ParseIP("::")
var localhostIPv6 = net.ParseIP("::1")
// Build a slice of iptables args that are common to from-container and from-host portal rules.
func iptablesCommonPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol api.Protocol, service proxy.ServicePortName) []string {
func iptablesCommonPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol v1.Protocol, service proxy.ServicePortName) []string {
// This list needs to include all fields as they are eventually spit out
// by iptables-save. This is because some systems do not support the
// 'iptables -C' arg, and so fall back on parsing iptables-save output.
@ -982,7 +980,7 @@ func iptablesCommonPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, add
}
// Build a slice of iptables args for a from-container portal rule.
func (proxier *Proxier) iptablesContainerPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
func (proxier *Proxier) iptablesContainerPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
args := iptablesCommonPortalArgs(destIP, addPhysicalInterfaceMatch, addDstLocalMatch, destPort, protocol, service)
// This is tricky.
@ -1029,7 +1027,7 @@ func (proxier *Proxier) iptablesContainerPortalArgs(destIP net.IP, addPhysicalIn
}
// Build a slice of iptables args for a from-host portal rule.
func (proxier *Proxier) iptablesHostPortalArgs(destIP net.IP, addDstLocalMatch bool, destPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
func (proxier *Proxier) iptablesHostPortalArgs(destIP net.IP, addDstLocalMatch bool, destPort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
args := iptablesCommonPortalArgs(destIP, false, addDstLocalMatch, destPort, protocol, service)
// This is tricky.
@ -1061,27 +1059,10 @@ func (proxier *Proxier) iptablesHostPortalArgs(destIP net.IP, addDstLocalMatch b
return args
}
// Build a slice of iptables args for a from-container public-port rule.
// See iptablesContainerPortalArgs
// TODO: Should we just reuse iptablesContainerPortalArgs?
func (proxier *Proxier) iptablesContainerNodePortArgs(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
args := iptablesCommonPortalArgs(nil, false, false, nodePort, protocol, service)
if proxyIP.Equal(zeroIPv4) || proxyIP.Equal(zeroIPv6) {
// TODO: Can we REDIRECT with IPv6?
args = append(args, "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", proxyPort))
} else {
// TODO: Can we DNAT with IPv6?
args = append(args, "-j", "DNAT", "--to-destination", net.JoinHostPort(proxyIP.String(), strconv.Itoa(proxyPort)))
}
return args
}
// Build a slice of iptables args for a from-host public-port rule.
// See iptablesHostPortalArgs
// TODO: Should we just reuse iptablesHostPortalArgs?
func (proxier *Proxier) iptablesHostNodePortArgs(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
func (proxier *Proxier) iptablesHostNodePortArgs(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
args := iptablesCommonPortalArgs(nil, false, false, nodePort, protocol, service)
if proxyIP.Equal(zeroIPv4) || proxyIP.Equal(zeroIPv6) {
@ -1093,7 +1074,7 @@ func (proxier *Proxier) iptablesHostNodePortArgs(nodePort int, protocol api.Prot
}
// Build a slice of iptables args for an from-non-local public-port rule.
func (proxier *Proxier) iptablesNonLocalNodePortArgs(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
func (proxier *Proxier) iptablesNonLocalNodePortArgs(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
args := iptablesCommonPortalArgs(nil, false, false, proxyPort, protocol, service)
args = append(args, "-m", "state", "--state", "NEW", "-j", "ACCEPT")
return args

View File

@ -29,10 +29,10 @@ import (
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec"
@ -228,11 +228,11 @@ func waitForNumProxyClients(t *testing.T, s *ServiceInfo, want int, timeout time
func TestTCPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -255,11 +255,11 @@ func TestTCPProxy(t *testing.T) {
func TestUDPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -282,11 +282,11 @@ func TestUDPProxy(t *testing.T) {
func TestUDPProxyTimeout(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -314,18 +314,18 @@ func TestMultiPortProxy(t *testing.T) {
lb := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
}},
})
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
}},
})
@ -366,9 +366,9 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
}
waitForNumProxyLoops(t, p, 0)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: 80,
Protocol: "TCP",
@ -413,11 +413,11 @@ func stopProxyByName(proxier *Proxier, service proxy.ServicePortName) error {
func TestTCPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -457,11 +457,11 @@ func TestTCPProxyStop(t *testing.T) {
func TestUDPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -495,11 +495,11 @@ func TestUDPProxyStop(t *testing.T) {
func TestTCPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -522,9 +522,9 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
@ -539,11 +539,11 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
func TestUDPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -566,9 +566,9 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
@ -583,11 +583,11 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
endpoint := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
@ -611,9 +611,9 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
@ -626,9 +626,9 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
@ -645,11 +645,11 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
endpoint := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
@ -673,9 +673,9 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
@ -688,9 +688,9 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "UDP",
@ -707,11 +707,11 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
func TestTCPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -730,9 +730,9 @@ func TestTCPProxyUpdatePort(t *testing.T) {
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: 99,
Protocol: "TCP",
@ -755,11 +755,11 @@ func TestTCPProxyUpdatePort(t *testing.T) {
func TestUDPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -777,9 +777,9 @@ func TestUDPProxyUpdatePort(t *testing.T) {
}
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: 99,
Protocol: "UDP",
@ -800,11 +800,11 @@ func TestUDPProxyUpdatePort(t *testing.T) {
func TestProxyUpdatePublicIPs(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -823,10 +823,10 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
@ -852,11 +852,11 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
func TestProxyUpdatePortal(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
endpoint := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
@ -876,18 +876,18 @@ func TestProxyUpdatePortal(t *testing.T) {
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
waitForNumProxyLoops(t, p, 1)
svcv0 := &api.Service{
svcv0 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
}}},
}
svcv1 := &api.Service{
svcv1 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
@ -899,9 +899,9 @@ func TestProxyUpdatePortal(t *testing.T) {
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
}
svcv2 := &api.Service{
svcv2 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "None", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",
@ -913,9 +913,9 @@ func TestProxyUpdatePortal(t *testing.T) {
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
}
svcv3 := &api.Service{
svcv3 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.proxyPort),
Protocol: "TCP",

View File

@ -25,9 +25,9 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/proxy"
)
@ -45,7 +45,7 @@ type ProxySocket interface {
ListenPort() int
}
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (ProxySocket, error) {
func newProxySocket(protocol v1.Protocol, ip net.IP, port int) (ProxySocket, error) {
host := ""
if ip != nil {
host = ip.String()
@ -68,6 +68,8 @@ func newProxySocket(protocol api.Protocol, ip net.IP, port int) (ProxySocket, er
return nil, err
}
return &udpProxySocket{UDPConn: conn, port: port}, nil
case "SCTP":
return nil, fmt.Errorf("SCTP is not supported for user space proxy")
}
return nil, fmt.Errorf("unknown protocol %q", protocol)
}
@ -93,10 +95,10 @@ func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protoc
for _, dialTimeout := range EndpointDialTimeouts {
endpoint, err := loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset)
if err != nil {
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
klog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
return nil, err
}
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
klog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
@ -104,7 +106,7 @@ func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protoc
if isTooManyFDsError(err) {
panic("Dial failed: " + err.Error())
}
glog.Errorf("Dial failed: %v", err)
klog.Errorf("Dial failed: %v", err)
sessionAffinityReset = true
continue
}
@ -133,13 +135,13 @@ func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv
// Then the service port was just closed so the accept failure is to be expected.
return
}
glog.Errorf("Accept failed: %v", err)
klog.Errorf("Accept failed: %v", err)
continue
}
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
klog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
outConn, err := TryConnectEndpoints(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", loadBalancer)
if err != nil {
glog.Errorf("Failed to connect to balancer: %v", err)
klog.Errorf("Failed to connect to balancer: %v", err)
inConn.Close()
continue
}
@ -152,7 +154,7 @@ func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv
func ProxyTCP(in, out *net.TCPConn) {
var wg sync.WaitGroup
wg.Add(2)
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
klog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
go copyBytes("from backend", in, out, &wg)
go copyBytes("to backend", out, in, &wg)
@ -161,14 +163,14 @@ func ProxyTCP(in, out *net.TCPConn) {
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
defer wg.Done()
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
klog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
n, err := io.Copy(dest, src)
if err != nil {
if !isClosedError(err) {
glog.Errorf("I/O error: %v", err)
klog.Errorf("I/O error: %v", err)
}
}
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
klog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
dest.Close()
src.Close()
}
@ -213,11 +215,11 @@ func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv
if err != nil {
if e, ok := err.(net.Error); ok {
if e.Temporary() {
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
klog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
continue
}
}
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
klog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
break
}
// If this is a client we know already, reuse the connection and goroutine.
@ -230,14 +232,14 @@ func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv
_, err = svrConn.Write(buffer[0:n])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Write failed: %v", err)
klog.Errorf("Write failed: %v", err)
// TODO: Maybe tear down the goroutine for this client/server pair?
}
continue
}
err = svrConn.SetDeadline(time.Now().Add(myInfo.Timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
klog.Errorf("SetDeadline failed: %v", err)
continue
}
}
@ -251,14 +253,14 @@ func (udp *udpProxySocket) getBackendConn(activeClients *ClientCache, cliAddr ne
if !found {
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
glog.V(3).Infof("New UDP connection from %s", cliAddr)
klog.V(3).Infof("New UDP connection from %s", cliAddr)
var err error
svrConn, err = TryConnectEndpoints(service, cliAddr, "udp", loadBalancer)
if err != nil {
return nil, err
}
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
glog.Errorf("SetDeadline failed: %v", err)
klog.Errorf("SetDeadline failed: %v", err)
return nil, err
}
activeClients.Clients[cliAddr.String()] = svrConn
@ -279,19 +281,19 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ
n, err := svrConn.Read(buffer[0:])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Read failed: %v", err)
klog.Errorf("Read failed: %v", err)
}
break
}
err = svrConn.SetDeadline(time.Now().Add(timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
klog.Errorf("SetDeadline failed: %v", err)
break
}
n, err = udp.WriteTo(buffer[0:n], cliAddr)
if err != nil {
if !logTimeout(err) {
glog.Errorf("WriteTo failed: %v", err)
klog.Errorf("WriteTo failed: %v", err)
}
break
}

View File

@ -25,9 +25,9 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/util/slice"
)
@ -46,7 +46,7 @@ type affinityState struct {
}
type affinityPolicy struct {
affinityType api.ServiceAffinity
affinityType v1.ServiceAffinity
affinityMap map[string]*affinityState // map client IP -> affinity info
ttlSeconds int
}
@ -66,7 +66,7 @@ type balancerState struct {
affinity affinityPolicy
}
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlSeconds int) *affinityPolicy {
func newAffinityPolicy(affinityType v1.ServiceAffinity, ttlSeconds int) *affinityPolicy {
return &affinityPolicy{
affinityType: affinityType,
affinityMap: make(map[string]*affinityState),
@ -81,8 +81,8 @@ func NewLoadBalancerRR() *LoadBalancerRR {
}
}
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) error {
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error {
klog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
@ -90,14 +90,14 @@ func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType
}
// This assumes that lb.lock is already held.
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) *balancerState {
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) *balancerState {
if ttlSeconds == 0 {
ttlSeconds = int(api.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
ttlSeconds = int(v1.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
}
if _, exists := lb.services[svcPort]; !exists {
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
klog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
} else if affinityType != "" {
lb.services[svcPort].affinity.affinityType = affinityType
}
@ -105,7 +105,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi
}
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
klog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
delete(lb.services, svcPort)
@ -114,7 +114,7 @@ func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
// return true if this service is using some form of session affinity.
func isSessionAffinity(affinity *affinityPolicy) bool {
// Should never be empty string, but checking for it to be safe.
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
if affinity.affinityType == "" || affinity.affinityType == v1.ServiceAffinityNone {
return false
}
return true
@ -145,7 +145,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
if len(state.endpoints) == 0 {
return "", ErrMissingEndpoints
}
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
klog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
@ -163,7 +163,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
// Affinity wins.
endpoint := sessionAffinity.endpoint
sessionAffinity.lastUsed = time.Now()
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
klog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
return endpoint, nil
}
}
@ -182,7 +182,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
affinity.lastUsed = time.Now()
affinity.endpoint = endpoint
affinity.clientIP = ipaddr
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
klog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
}
return endpoint, nil
@ -214,7 +214,7 @@ func flattenValidEndpoints(endpoints []hostPortPair) []string {
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
for _, affinity := range state.affinity.affinityMap {
if affinity.endpoint == endpoint {
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
klog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
delete(state.affinity.affinityMap, affinity.clientIP)
}
}
@ -237,7 +237,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn
}
for mKey, mVal := range allEndpoints {
if mVal == 1 {
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
klog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
removeSessionAffinityByEndpoint(state, svcPort, mKey)
}
}
@ -245,7 +245,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn
// buildPortsToEndpointsMap builds a map of portname -> all ip:ports for that
// portname. Expode Endpoints.Subsets[*] into this structure.
func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPair {
func buildPortsToEndpointsMap(endpoints *v1.Endpoints) map[string][]hostPortPair {
portsToEndpoints := map[string][]hostPortPair{}
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
@ -261,7 +261,7 @@ func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPai
return portsToEndpoints
}
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
@ -273,13 +273,13 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
state, exists := lb.services[svcPort]
if !exists || state == nil || len(newEndpoints) > 0 {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsAdd can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
@ -288,7 +288,7 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
}
}
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
oldPortsToEndpoints := buildPortsToEndpointsMap(oldEndpoints)
registeredEndpoints := make(map[proxy.ServicePortName]bool)
@ -307,13 +307,13 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoin
}
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsUpdate can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
@ -335,7 +335,7 @@ func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) {
// If the service is still around, reset but don't delete.
if state, ok := lb.services[svcPort]; ok {
if len(state.endpoints) > 0 {
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
state.endpoints = []string{}
}
state.index = 0
@ -343,7 +343,7 @@ func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) {
}
}
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *v1.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
@ -379,7 +379,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa
}
for ip, affinity := range state.affinity.affinityMap {
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
klog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
delete(state.affinity.affinityMap, ip)
}
}

View File

@ -20,9 +20,9 @@ import (
"net"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
)
@ -104,11 +104,11 @@ func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 40}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
@ -141,11 +141,11 @@ func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
@ -168,16 +168,16 @@ func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
},
},
}
@ -210,20 +210,20 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpointsv1 := &api.Endpoints{
endpointsv1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
Addresses: []v1.EndpointAddress{{IP: "endpoint2"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
},
},
}
@ -249,16 +249,16 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
endpointsv2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
Addresses: []v1.EndpointAddress{{IP: "endpoint5"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
},
},
}
@ -283,7 +283,7 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
@ -300,21 +300,21 @@ func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints1 := &api.Endpoints{
endpoints1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 123}},
},
},
}
endpoints2 := &api.Endpoints{
endpoints2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 456}},
},
},
}
@ -357,13 +357,13 @@ func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
@ -413,15 +413,15 @@ func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
}
// Call OnEndpointsUpdate() before NewService()
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
@ -473,13 +473,13 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
@ -494,12 +494,12 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
client3Endpoint := shuffledEndpoints[2]
endpointsv2 := &api.Endpoints{
endpointsv2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}},
},
},
}
@ -516,12 +516,12 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
endpointsv3 := &api.Endpoints{
endpointsv3 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
},
},
}
@ -546,13 +546,13 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
@ -567,12 +567,12 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
endpointsv2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
@ -586,7 +586,7 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
@ -605,24 +605,24 @@ func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints1 := &api.Endpoints{
loadBalancer.NewService(fooService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints2 := &api.Endpoints{
loadBalancer.NewService(barService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
@ -674,13 +674,13 @@ func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)

View File

@ -11,14 +11,13 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/proxy/util",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/util/net:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -31,11 +30,11 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy/util/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)

View File

@ -21,7 +21,7 @@ import (
"net"
"strconv"
"github.com/golang/glog"
"k8s.io/klog"
)
// IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP
@ -35,14 +35,14 @@ func IPPart(s string) string {
// Must be IP:port
host, _, err := net.SplitHostPort(s)
if err != nil {
glog.Errorf("Error parsing '%s': %v", s, err)
klog.Errorf("Error parsing '%s': %v", s, err)
return ""
}
// Check if host string is a valid IP address
if ip := net.ParseIP(host); ip != nil {
return ip.String()
} else {
glog.Errorf("invalid IP part '%s'", host)
klog.Errorf("invalid IP part '%s'", host)
}
return ""
}
@ -52,12 +52,12 @@ func PortPart(s string) (int, error) {
// Must be IP:port
_, port, err := net.SplitHostPort(s)
if err != nil {
glog.Errorf("Error parsing '%s': %v", s, err)
klog.Errorf("Error parsing '%s': %v", s, err)
return -1, err
}
portNumber, err := strconv.Atoi(port)
if err != nil {
glog.Errorf("Error parsing '%s': %v", port, err)
klog.Errorf("Error parsing '%s': %v", port, err)
return -1, err
}
return portNumber, nil

View File

@ -50,6 +50,58 @@ func TestIPPart(t *testing.T) {
}
}
func TestPortPart(t *testing.T) {
tests := []struct {
name string
endpoint string
want int
wantErr bool
}{
{
"no error parsing from ipv4-ip:port",
"1.2.3.4:1024",
1024,
false,
},
{
"no error parsing from ipv6-ip:port",
"[2001:db8::2:2]:9999",
9999,
false,
},
{
"error: missing port",
"1.2.3.4",
-1,
true,
},
{
"error: invalid port '1-2'",
"1.2.3.4:1-2",
-1,
true,
},
{
"error: invalid port 'port'",
"100.200.3.4:port",
-1,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := PortPart(tt.endpoint)
if (err != nil) != tt.wantErr {
t.Errorf("PortPart() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("PortPart() = %v, want %v", got, tt.want)
}
})
}
}
func TestToCIDR(t *testing.T) {
testCases := []struct {
ip string

View File

@ -21,7 +21,7 @@ import (
"net"
"strconv"
"github.com/golang/glog"
"k8s.io/klog"
)
// LocalPort describes a port on specific IP address and protocol
@ -60,7 +60,7 @@ func RevertPorts(replacementPortsMap, originalPortsMap map[LocalPort]Closeable)
for k, v := range replacementPortsMap {
// Only close newly opened local ports - leave ones that were open before this update
if originalPortsMap[k] == nil {
glog.V(2).Infof("Closing local port %s", k.String())
klog.V(2).Infof("Closing local port %s", k.String())
v.Close()
}
}

View File

@ -38,6 +38,8 @@ func TestLocalPortString(t *testing.T) {
{"IPv4 UDP", "1.2.3.4", 9999, "udp", "\"IPv4 UDP\" (1.2.3.4:9999/udp)"},
{"IPv4 TCP", "5.6.7.8", 1053, "tcp", "\"IPv4 TCP\" (5.6.7.8:1053/tcp)"},
{"IPv6 TCP", "2001:db8::1", 80, "tcp", "\"IPv6 TCP\" ([2001:db8::1]:80/tcp)"},
{"IPv4 SCTP", "9.10.11.12", 7777, "sctp", "\"IPv4 SCTP\" (9.10.11.12:7777/sctp)"},
{"IPv6 SCTP", "2001:db8::2", 80, "sctp", "\"IPv6 SCTP\" ([2001:db8::2]:80/sctp)"},
}
for _, tc := range testCases {

View File

@ -24,11 +24,10 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
utilnet "k8s.io/kubernetes/pkg/util/net"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@ -60,15 +59,15 @@ func IsLocalIP(ip string) (bool, error) {
return false, nil
}
func ShouldSkipService(svcName types.NamespacedName, service *api.Service) bool {
func ShouldSkipService(svcName types.NamespacedName, service *v1.Service) bool {
// if ClusterIP is "None" or empty, skip proxying
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return true
}
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
if service.Spec.Type == api.ServiceTypeExternalName {
glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
if service.Spec.Type == v1.ServiceTypeExternalName {
klog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
return true
}
return false
@ -135,7 +134,7 @@ func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error)
// LogAndEmitIncorrectIPVersionEvent logs and emits incorrect IP version event.
func LogAndEmitIncorrectIPVersionEvent(recorder record.EventRecorder, fieldName, fieldValue, svcNamespace, svcName string, svcUID types.UID) {
errMsg := fmt.Sprintf("%s in %s has incorrect IP version", fieldValue, fieldName)
glog.Errorf("%s (service %s/%s).", errMsg, svcNamespace, svcName)
klog.Errorf("%s (service %s/%s).", errMsg, svcNamespace, svcName)
if recorder != nil {
recorder.Eventf(
&v1.ObjectReference{

View File

@ -20,25 +20,25 @@ import (
"net"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
api "k8s.io/kubernetes/pkg/apis/core"
fake "k8s.io/kubernetes/pkg/proxy/util/testing"
)
func TestShouldSkipService(t *testing.T) {
testCases := []struct {
service *api.Service
service *v1.Service
svcName types.NamespacedName
shouldSkip bool
}{
{
// Cluster IP is None
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
ClusterIP: api.ClusterIPNone,
Spec: v1.ServiceSpec{
ClusterIP: v1.ClusterIPNone,
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
@ -46,9 +46,9 @@ func TestShouldSkipService(t *testing.T) {
},
{
// Cluster IP is empty
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: "",
},
},
@ -57,11 +57,11 @@ func TestShouldSkipService(t *testing.T) {
},
{
// ExternalName type service
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: api.ServiceTypeExternalName,
Type: v1.ServiceTypeExternalName,
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
@ -69,11 +69,11 @@ func TestShouldSkipService(t *testing.T) {
},
{
// ClusterIP type service with ClusterIP set
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: api.ServiceTypeClusterIP,
Type: v1.ServiceTypeClusterIP,
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
@ -81,11 +81,11 @@ func TestShouldSkipService(t *testing.T) {
},
{
// NodePort type service with ClusterIP set
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: api.ServiceTypeNodePort,
Type: v1.ServiceTypeNodePort,
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},
@ -93,11 +93,11 @@ func TestShouldSkipService(t *testing.T) {
},
{
// LoadBalancer type service with ClusterIP set
service: &api.Service{
service: &v1.Service{
ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
Spec: api.ServiceSpec{
Spec: v1.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: api.ServiceTypeLoadBalancer,
Type: v1.ServiceTypeLoadBalancer,
},
},
svcName: types.NamespacedName{Namespace: "foo", Name: "bar"},

View File

@ -4,31 +4,27 @@ go_library(
name = "go_default_library",
srcs = [
"metrics.go",
] + select({
"@io_bazel_rules_go//go/platform:windows": [
"proxier.go",
],
"//conditions:default": [],
}),
"proxier.go",
],
importpath = "k8s.io/kubernetes/pkg/proxy/winkernel",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:windows": [
"//pkg/api/service:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/api/v1/service:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/healthcheck:go_default_library",
"//pkg/util/async:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/github.com/Microsoft/hcsshim:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
"//conditions:default": [],
}),

View File

@ -1,3 +1,8 @@
approvers:
- dineshgovindasamy
- madhanrm
- feiskyer
reviewers:
- dineshgovindasamy
- madhanrm
- feiskyer

View File

@ -30,15 +30,15 @@ import (
"github.com/Microsoft/hcsshim"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
apiservice "k8s.io/kubernetes/pkg/api/service"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
apiservice "k8s.io/kubernetes/pkg/api/v1/service"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
"k8s.io/kubernetes/pkg/util/async"
@ -86,11 +86,11 @@ type loadBalancerIngressInfo struct {
type serviceInfo struct {
clusterIP net.IP
port int
protocol api.Protocol
protocol v1.Protocol
nodePort int
targetPort int
loadBalancerStatus api.LoadBalancerStatus
sessionAffinityType api.ServiceAffinity
loadBalancerStatus v1.LoadBalancerStatus
sessionAffinityType v1.ServiceAffinity
stickyMaxAgeSeconds int
externalIPs []*externalIPInfo
loadBalancerIngressIPs []*loadBalancerIngressInfo
@ -107,14 +107,14 @@ type hnsNetworkInfo struct {
id string
}
func Log(v interface{}, message string, level glog.Level) {
glog.V(level).Infof("%s, %s", message, spew.Sdump(v))
func Log(v interface{}, message string, level klog.Level) {
klog.V(level).Infof("%s, %s", message, spew.Sdump(v))
}
func LogJson(v interface{}, message string, level glog.Level) {
func LogJson(v interface{}, message string, level klog.Level) {
jsonString, err := json.Marshal(v)
if err == nil {
glog.V(level).Infof("%s, %s", message, string(jsonString))
klog.V(level).Infof("%s, %s", message, string(jsonString))
}
}
@ -128,12 +128,23 @@ type endpointsInfo struct {
refCount uint16
}
//Uses mac prefix and IPv4 address to return a mac address
//This ensures mac addresses are unique for proper load balancing
//Does not support IPv6 and returns a dummy mac
func conjureMac(macPrefix string, ip net.IP) string {
if ip4 := ip.To4(); ip4 != nil {
a, b, c, d := ip4[0], ip4[1], ip4[2], ip4[3]
return fmt.Sprintf("%v-%02x-%02x-%02x-%02x", macPrefix, a, b, c, d)
}
return "02-11-22-33-44-55"
}
func newEndpointInfo(ip string, port uint16, isLocal bool) *endpointsInfo {
info := &endpointsInfo{
ip: ip,
port: port,
isLocal: isLocal,
macAddress: "00:11:22:33:44:55", // Hardcoding to some Random Mac
macAddress: conjureMac("02-11", net.ParseIP(ip)),
refCount: 0,
hnsID: "",
}
@ -148,7 +159,7 @@ func (ep *endpointsInfo) Cleanup() {
// Never delete a Local Endpoint. Local Endpoints are already created by other entities.
// Remove only remote endpoints created by this service
if ep.refCount <= 0 && !ep.isLocal {
glog.V(4).Infof("Removing endpoints for %v, since no one is referencing it", ep)
klog.V(4).Infof("Removing endpoints for %v, since no one is referencing it", ep)
deleteHnsEndpoint(ep.hnsID)
ep.hnsID = ""
}
@ -156,7 +167,7 @@ func (ep *endpointsInfo) Cleanup() {
}
// returns a new serviceInfo struct
func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo {
func newServiceInfo(svcPortName proxy.ServicePortName, port *v1.ServicePort, service *v1.Service) *serviceInfo {
onlyNodeLocalEndpoints := false
if apiservice.RequestsOnlyLocalTraffic(service) {
onlyNodeLocalEndpoints = true
@ -164,15 +175,17 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, se
// set default session sticky max age 180min=10800s
stickyMaxAgeSeconds := 10800
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
// Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP
stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
info := &serviceInfo{
clusterIP: net.ParseIP(service.Spec.ClusterIP),
port: int(port.Port),
protocol: port.Protocol,
nodePort: int(port.NodePort),
clusterIP: net.ParseIP(service.Spec.ClusterIP),
port: int(port.Port),
protocol: port.Protocol,
nodePort: int(port.NodePort),
// targetPort is zero if it is specified as a name in port.TargetPort.
// Its real value would be got later from endpoints.
targetPort: port.TargetPort.IntValue(),
// Deep-copy in case the service instance changes
loadBalancerStatus: *service.Status.LoadBalancer.DeepCopy(),
@ -193,7 +206,7 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, se
if apiservice.NeedsHealthCheck(service) {
p := service.Spec.HealthCheckNodePort
if p == 0 {
glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String())
klog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String())
} else {
info.healthCheckNodePort = int(p)
}
@ -243,7 +256,7 @@ func newEndpointsChangeMap(hostname string) endpointsChangeMap {
}
}
func (ecm *endpointsChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Endpoints) bool {
func (ecm *endpointsChangeMap) update(namespacedName *types.NamespacedName, previous, current *v1.Endpoints) bool {
ecm.lock.Lock()
defer ecm.lock.Unlock()
@ -266,7 +279,7 @@ func newServiceChangeMap() serviceChangeMap {
}
}
func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Service) bool {
func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previous, current *v1.Service) bool {
scm.lock.Lock()
defer scm.lock.Unlock()
@ -290,9 +303,9 @@ func (sm *proxyServiceMap) merge(other proxyServiceMap, curEndpoints proxyEndpoi
existingPorts.Insert(svcPortName.Port)
svcInfo, exists := (*sm)[svcPortName]
if !exists {
glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
klog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
} else {
glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
klog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
svcInfo.cleanupAllPolicies(curEndpoints[svcPortName])
delete(*sm, svcPortName)
}
@ -308,14 +321,14 @@ func (sm *proxyServiceMap) unmerge(other proxyServiceMap, existingPorts, staleSe
}
info, exists := (*sm)[svcPortName]
if exists {
glog.V(1).Infof("Removing service port %q", svcPortName)
if info.protocol == api.ProtocolUDP {
klog.V(1).Infof("Removing service port %q", svcPortName)
if info.protocol == v1.ProtocolUDP {
staleServices.Insert(info.clusterIP.String())
}
info.cleanupAllPolicies(curEndpoints[svcPortName])
delete(*sm, svcPortName)
} else {
glog.Errorf("Service port %q removed, but doesn't exists", svcPortName)
klog.Errorf("Service port %q removed, but doesn't exists", svcPortName)
}
}
}
@ -327,13 +340,13 @@ func (em proxyEndpointsMap) merge(other proxyEndpointsMap, curServices proxyServ
if exists {
//
info, exists := curServices[svcPortName]
glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
klog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
if exists {
glog.V(2).Infof("Endpoints are modified. Service [%v] is stale", svcPortName)
klog.V(2).Infof("Endpoints are modified. Service [%v] is stale", svcPortName)
info.cleanupAllPolicies(epInfos)
} else {
// If no service exists, just cleanup the remote endpoints
glog.V(2).Infof("Endpoints are orphaned. Cleaning up")
klog.V(2).Infof("Endpoints are orphaned. Cleaning up")
// Cleanup Endpoints references
for _, ep := range epInfos {
ep.Cleanup()
@ -352,11 +365,11 @@ func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap, curServices proxySe
for svcPortName := range other {
info, exists := curServices[svcPortName]
if exists {
glog.V(2).Infof("Service [%v] is stale", info)
klog.V(2).Infof("Service [%v] is stale", info)
info.cleanupAllPolicies(em[svcPortName])
} else {
// If no service exists, just cleanup the remote endpoints
glog.V(2).Infof("Endpoints are orphaned. Cleaning up")
klog.V(2).Infof("Endpoints are orphaned. Cleaning up")
// Cleanup Endpoints references
epInfos, exists := em[svcPortName]
if exists {
@ -421,13 +434,16 @@ func (lp *localPort) String() string {
return fmt.Sprintf("%q (%s:%d/%s)", lp.desc, lp.ip, lp.port, lp.protocol)
}
func Enum(p api.Protocol) uint16 {
if p == api.ProtocolTCP {
func Enum(p v1.Protocol) uint16 {
if p == v1.ProtocolTCP {
return 6
}
if p == api.ProtocolUDP {
if p == v1.ProtocolUDP {
return 17
}
if p == v1.ProtocolSCTP {
return 132
}
return 0
}
@ -454,12 +470,12 @@ func NewProxier(
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
if nodeIP == nil {
glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
klog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
nodeIP = net.ParseIP("127.0.0.1")
}
if len(clusterCIDR) == 0 {
glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic")
klog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic")
}
healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps
@ -471,11 +487,11 @@ func NewProxier(
}
hnsNetwork, err := getHnsNetworkInfo(hnsNetworkName)
if err != nil {
glog.Fatalf("Unable to find Hns Network specified by %s. Please check environment variable KUBE_NETWORK", hnsNetworkName)
klog.Fatalf("Unable to find Hns Network specified by %s. Please check environment variable KUBE_NETWORK", hnsNetworkName)
return nil, err
}
glog.V(1).Infof("Hns Network loaded with info = %v", hnsNetwork)
klog.V(1).Infof("Hns Network loaded with info = %v", hnsNetwork)
proxier := &Proxier{
portsMap: make(map[localPort]closeable),
@ -495,7 +511,7 @@ func NewProxier(
}
burstSyncs := 2
glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
klog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
return proxier, nil
@ -514,15 +530,14 @@ func CleanupLeftovers() (encounteredError bool) {
func (svcInfo *serviceInfo) cleanupAllPolicies(endpoints []*endpointsInfo) {
Log(svcInfo, "Service Cleanup", 3)
if svcInfo.policyApplied {
svcInfo.deleteAllHnsLoadBalancerPolicy()
// Cleanup Endpoints references
for _, ep := range endpoints {
ep.Cleanup()
}
svcInfo.policyApplied = false
// Skip the svcInfo.policyApplied check to remove all the policies
svcInfo.deleteAllHnsLoadBalancerPolicy()
// Cleanup Endpoints references
for _, ep := range endpoints {
ep.Cleanup()
}
svcInfo.policyApplied = false
}
func (svcInfo *serviceInfo) deleteAllHnsLoadBalancerPolicy() {
@ -553,7 +568,7 @@ func deleteAllHnsLoadBalancerPolicy() {
LogJson(plist, "Remove Policy", 3)
_, err = plist.Delete()
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
}
}
@ -614,36 +629,36 @@ func deleteHnsLoadBalancerPolicy(hnsID string) {
// Cleanup HNS policies
hnsloadBalancer, err := hcsshim.GetPolicyListByID(hnsID)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
return
}
LogJson(hnsloadBalancer, "Removing Policy", 2)
_, err = hnsloadBalancer.Delete()
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
}
}
func deleteHnsEndpoint(hnsID string) {
hnsendpoint, err := hcsshim.GetHNSEndpointByID(hnsID)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
return
}
_, err = hnsendpoint.Delete()
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
}
glog.V(3).Infof("Remote endpoint resource deleted id %s", hnsID)
klog.V(3).Infof("Remote endpoint resource deleted id %s", hnsID)
}
func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) {
hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
return nil, err
}
@ -656,7 +671,7 @@ func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) {
func getHnsEndpointByIpAddress(ip net.IP, networkName string) (*hcsshim.HNSEndpoint, error) {
hnsnetwork, err := hcsshim.GetHNSNetworkByName(networkName)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
return nil, err
}
@ -697,21 +712,21 @@ func (proxier *Proxier) isInitialized() bool {
return atomic.LoadInt32(&proxier.initialized) > 0
}
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
func (proxier *Proxier) OnServiceAdd(service *v1.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, nil, service) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, oldService, service) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
func (proxier *Proxier) OnServiceDelete(service *v1.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, service, nil) && proxier.isInitialized() {
proxier.syncRunner.Run()
@ -728,15 +743,15 @@ func (proxier *Proxier) OnServiceSynced() {
proxier.syncProxyRules()
}
func shouldSkipService(svcName types.NamespacedName, service *api.Service) bool {
func shouldSkipService(svcName types.NamespacedName, service *v1.Service) bool {
// if ClusterIP is "None" or empty, skip proxying
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return true
}
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
if service.Spec.Type == api.ServiceTypeExternalName {
glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
if service.Spec.Type == v1.ServiceTypeExternalName {
klog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
return true
}
return false
@ -772,21 +787,21 @@ func (proxier *Proxier) updateServiceMap() (result updateServiceMapResult) {
return result
}
func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) {
func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, nil, endpoints) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, oldEndpoints, endpoints) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) {
func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, endpoints, nil) && proxier.isInitialized() {
proxier.syncRunner.Run()
@ -852,7 +867,7 @@ func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.S
// This function is used for incremental updated of endpointsMap.
//
// NOTE: endpoints object should NOT be modified.
func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string) proxyEndpointsMap {
func endpointsToEndpointsMap(endpoints *v1.Endpoints, hostname string) proxyEndpointsMap {
if endpoints == nil {
return nil
}
@ -865,7 +880,7 @@ func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string) proxyEnd
for i := range ss.Ports {
port := &ss.Ports[i]
if port.Port == 0 {
glog.Warningf("ignoring invalid endpoint port %s", port.Name)
klog.Warningf("ignoring invalid endpoint port %s", port.Name)
continue
}
svcPortName := proxy.ServicePortName{
@ -875,19 +890,19 @@ func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string) proxyEnd
for i := range ss.Addresses {
addr := &ss.Addresses[i]
if addr.IP == "" {
glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
klog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
continue
}
isLocal := addr.NodeName != nil && *addr.NodeName == hostname
epInfo := newEndpointInfo(addr.IP, uint16(port.Port), isLocal)
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], epInfo)
}
if glog.V(3) {
if klog.V(3) {
newEPList := []*endpointsInfo{}
for _, ep := range endpointsMap[svcPortName] {
newEPList = append(newEPList, ep)
}
glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
klog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
}
}
}
@ -897,7 +912,7 @@ func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string) proxyEnd
// Translates single Service object to proxyServiceMap.
//
// NOTE: service object should NOT be modified.
func serviceToServiceMap(service *api.Service) proxyServiceMap {
func serviceToServiceMap(service *v1.Service) proxyServiceMap {
if service == nil {
return nil
}
@ -924,11 +939,11 @@ func (proxier *Proxier) syncProxyRules() {
start := time.Now()
defer func() {
SyncProxyRulesLatency.Observe(sinceInMicroseconds(start))
glog.V(4).Infof("syncProxyRules took %v", time.Since(start))
klog.V(4).Infof("syncProxyRules took %v", time.Since(start))
}()
// don't sync rules till we've received services and endpoints
if !proxier.endpointsSynced || !proxier.servicesSynced {
glog.V(2).Info("Not syncing hns until Services and Endpoints have been received from master")
klog.V(2).Info("Not syncing hns until Services and Endpoints have been received from master")
return
}
@ -941,29 +956,37 @@ func (proxier *Proxier) syncProxyRules() {
staleServices := serviceUpdateResult.staleServices
// merge stale services gathered from updateEndpointsMap
for svcPortName := range endpointUpdateResult.staleServiceNames {
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == api.ProtocolUDP {
glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String())
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == v1.ProtocolUDP {
klog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String())
staleServices.Insert(svcInfo.clusterIP.String())
}
}
glog.V(3).Infof("Syncing Policies")
klog.V(3).Infof("Syncing Policies")
// Program HNS by adding corresponding policies for each service.
for svcName, svcInfo := range proxier.serviceMap {
if svcInfo.policyApplied {
glog.V(4).Infof("Policy already applied for %s", spew.Sdump(svcInfo))
klog.V(4).Infof("Policy already applied for %s", spew.Sdump(svcInfo))
continue
}
var hnsEndpoints []hcsshim.HNSEndpoint
glog.V(4).Infof("====Applying Policy for %s====", svcName)
klog.V(4).Infof("====Applying Policy for %s====", svcName)
// Create Remote endpoints for every endpoint, corresponding to the service
for _, ep := range proxier.endpointsMap[svcName] {
var newHnsEndpoint *hcsshim.HNSEndpoint
hnsNetworkName := proxier.network.name
var err error
// targetPort is zero if it is specified as a name in port.TargetPort, so the real port should be got from endpoints.
// Note that hcsshim.AddLoadBalancer() doesn't support endpoints with different ports, so only port from first endpoint is used.
// TODO(feiskyer): add support of different endpoint ports after hcsshim.AddLoadBalancer() add that.
if svcInfo.targetPort == 0 {
svcInfo.targetPort = int(ep.port)
}
if len(ep.hnsID) > 0 {
newHnsEndpoint, err = hcsshim.GetHNSEndpointByID(ep.hnsID)
}
@ -977,13 +1000,13 @@ func (proxier *Proxier) syncProxyRules() {
if newHnsEndpoint == nil {
if ep.isLocal {
glog.Errorf("Local endpoint not found for %v: err: %v on network %s", ep.ip, err, hnsNetworkName)
klog.Errorf("Local endpoint not found for %v: err: %v on network %s", ep.ip, err, hnsNetworkName)
continue
}
// hns Endpoint resource was not found, create one
hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
continue
}
@ -994,7 +1017,7 @@ func (proxier *Proxier) syncProxyRules() {
newHnsEndpoint, err = hnsnetwork.CreateRemoteEndpoint(hnsEndpoint)
if err != nil {
glog.Errorf("Remote endpoint creation failed: %v", err)
klog.Errorf("Remote endpoint creation failed: %v", err)
continue
}
}
@ -1007,19 +1030,19 @@ func (proxier *Proxier) syncProxyRules() {
Log(ep, "Endpoint resource found", 3)
}
glog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName)
klog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName)
if len(svcInfo.hnsID) > 0 {
// This should not happen
glog.Warningf("Load Balancer already exists %s -- Debug ", svcInfo.hnsID)
klog.Warningf("Load Balancer already exists %s -- Debug ", svcInfo.hnsID)
}
if len(hnsEndpoints) == 0 {
glog.Errorf("Endpoint information not available for service %s. Not applying any policy", svcName)
klog.Errorf("Endpoint information not available for service %s. Not applying any policy", svcName)
continue
}
glog.V(4).Infof("Trying to Apply Policies for service %s", spew.Sdump(svcInfo))
klog.V(4).Infof("Trying to Apply Policies for service %s", spew.Sdump(svcInfo))
var hnsLoadBalancer *hcsshim.PolicyList
hnsLoadBalancer, err := getHnsLoadBalancer(
@ -1031,12 +1054,12 @@ func (proxier *Proxier) syncProxyRules() {
uint16(svcInfo.port),
)
if err != nil {
glog.Errorf("Policy creation failed: %v", err)
klog.Errorf("Policy creation failed: %v", err)
continue
}
svcInfo.hnsID = hnsLoadBalancer.ID
glog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
klog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
// If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints
if svcInfo.nodePort > 0 {
@ -1049,12 +1072,12 @@ func (proxier *Proxier) syncProxyRules() {
uint16(svcInfo.nodePort),
)
if err != nil {
glog.Errorf("Policy creation failed: %v", err)
klog.Errorf("Policy creation failed: %v", err)
continue
}
svcInfo.nodePorthnsID = hnsLoadBalancer.ID
glog.V(3).Infof("Hns LoadBalancer resource created for nodePort resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
klog.V(3).Infof("Hns LoadBalancer resource created for nodePort resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
}
// Create a Load Balancer Policy for each external IP
@ -1069,11 +1092,11 @@ func (proxier *Proxier) syncProxyRules() {
uint16(svcInfo.port),
)
if err != nil {
glog.Errorf("Policy creation failed: %v", err)
klog.Errorf("Policy creation failed: %v", err)
continue
}
externalIp.hnsID = hnsLoadBalancer.ID
glog.V(3).Infof("Hns LoadBalancer resource created for externalIp resources %v, Id[%s]", externalIp, hnsLoadBalancer.ID)
klog.V(3).Infof("Hns LoadBalancer resource created for externalIp resources %v, Id[%s]", externalIp, hnsLoadBalancer.ID)
}
// Create a Load Balancer Policy for each loadbalancer ingress
for _, lbIngressIp := range svcInfo.loadBalancerIngressIPs {
@ -1087,11 +1110,11 @@ func (proxier *Proxier) syncProxyRules() {
uint16(svcInfo.port),
)
if err != nil {
glog.Errorf("Policy creation failed: %v", err)
klog.Errorf("Policy creation failed: %v", err)
continue
}
lbIngressIp.hnsID = hnsLoadBalancer.ID
glog.V(3).Infof("Hns LoadBalancer resource created for loadBalancer Ingress resources %v", lbIngressIp)
klog.V(3).Infof("Hns LoadBalancer resource created for loadBalancer Ingress resources %v", lbIngressIp)
}
svcInfo.policyApplied = true
Log(svcInfo, "+++Policy Successfully applied for service +++", 2)
@ -1106,17 +1129,17 @@ func (proxier *Proxier) syncProxyRules() {
// not "OnlyLocal", but the services list will not, and the healthChecker
// will just drop those endpoints.
if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil {
glog.Errorf("Error syncing healtcheck services: %v", err)
klog.Errorf("Error syncing healthcheck services: %v", err)
}
if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil {
glog.Errorf("Error syncing healthcheck endoints: %v", err)
klog.Errorf("Error syncing healthcheck endpoints: %v", err)
}
// Finish housekeeping.
// TODO: these could be made more consistent.
for _, svcIP := range staleServices.UnsortedList() {
// TODO : Check if this is required to cleanup stale services here
glog.V(5).Infof("Pending delete stale service IP %s connections", svcIP)
klog.V(5).Infof("Pending delete stale service IP %s connections", svcIP)
}
}

View File

@ -17,17 +17,17 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/proxy/winuserspace",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/util/ipconfig:go_default_library",
"//pkg/util/netsh:go_default_library",
"//pkg/util/slice:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/github.com/miekg/dns:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@ -40,12 +40,12 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/util/netsh/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
],
)

View File

@ -17,7 +17,7 @@ limitations under the License.
package winuserspace
import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/proxy"
"net"
)
@ -27,7 +27,7 @@ type LoadBalancer interface {
// NextEndpoint returns the endpoint to handle a request for the given
// service-port and source address.
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeMinutes int) error
NewService(service proxy.ServicePortName, sessionAffinityType v1.ServiceAffinity, stickyMaxAgeMinutes int) error
DeleteService(service proxy.ServicePortName)
CleanupStaleStickySessions(service proxy.ServicePortName)
}

View File

@ -25,13 +25,13 @@ import (
"sync/atomic"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/util/netsh"
)
@ -47,12 +47,12 @@ type portal struct {
type serviceInfo struct {
isAliveAtomic int32 // Only access this with atomic ops
portal portal
protocol api.Protocol
protocol v1.Protocol
socket proxySocket
timeout time.Duration
activeClients *clientCache
dnsClients *dnsClientCache
sessionAffinityType api.ServiceAffinity
sessionAffinityType v1.ServiceAffinity
}
func (info *serviceInfo) setAlive(b bool) {
@ -70,7 +70,7 @@ func (info *serviceInfo) isAlive() bool {
func logTimeout(err error) bool {
if e, ok := err.(net.Error); ok {
if e.Timeout() {
glog.V(3).Infof("connection to endpoint closed due to inactivity")
klog.V(3).Infof("connection to endpoint closed due to inactivity")
return true
}
}
@ -100,7 +100,7 @@ var _ proxy.ProxyProvider = &Proxier{}
type portMapKey struct {
ip string
port int
protocol api.Protocol
protocol v1.Protocol
}
func (k *portMapKey) String() string {
@ -140,7 +140,7 @@ func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interfac
return nil, fmt.Errorf("failed to select a host interface: %v", err)
}
glog.V(2).Infof("Setting proxy IP to %v", hostIP)
klog.V(2).Infof("Setting proxy IP to %v", hostIP)
return createProxier(loadBalancer, listenIP, netsh, hostIP, syncPeriod, udpIdleTimeout)
}
@ -167,7 +167,7 @@ func (proxier *Proxier) SyncLoop() {
defer t.Stop()
for {
<-t.C
glog.V(6).Infof("Periodic sync")
klog.V(6).Infof("Periodic sync")
proxier.Sync()
}
}
@ -223,7 +223,7 @@ func (proxier *Proxier) setServiceInfo(service ServicePortPortalName, info *serv
// addServicePortPortal starts listening for a new service, returning the serviceInfo.
// The timeout only applies to UDP connections, for now.
func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol api.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) {
func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol v1.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) {
var serviceIP net.IP
if listenIP != allAvailableInterfaces {
if serviceIP = net.ParseIP(listenIP); serviceIP == nil {
@ -234,7 +234,7 @@ func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPo
if existed, err := proxier.netsh.EnsureIPAddress(args, serviceIP); err != nil {
return nil, err
} else if !existed {
glog.V(3).Infof("Added ip address to fowarder interface for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
klog.V(3).Infof("Added ip address to fowarder interface for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
}
}
@ -255,11 +255,11 @@ func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPo
timeout: timeout,
activeClients: newClientCache(),
dnsClients: newDNSClientCache(),
sessionAffinityType: api.ServiceAffinityNone, // default
sessionAffinityType: v1.ServiceAffinityNone, // default
}
proxier.setServiceInfo(servicePortPortalName, si)
glog.V(2).Infof("Proxying for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
klog.V(2).Infof("Proxying for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
go func(service ServicePortPortalName, proxier *Proxier) {
defer runtime.HandleCrash()
atomic.AddInt32(&proxier.numProxyLoops, 1)
@ -288,7 +288,7 @@ func (proxier *Proxier) closeServicePortPortal(servicePortPortalName ServicePort
}
// getListenIPPortMap returns a slice of all listen IPs for a service.
func getListenIPPortMap(service *api.Service, listenPort int, nodePort int) map[string]int {
func getListenIPPortMap(service *v1.Service, listenPort int, nodePort int) map[string]int {
listenIPPortMap := make(map[string]int)
listenIPPortMap[service.Spec.ClusterIP] = listenPort
@ -307,13 +307,13 @@ func getListenIPPortMap(service *api.Service, listenPort int, nodePort int) map[
return listenIPPortMap
}
func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortalName]bool {
func (proxier *Proxier) mergeService(service *v1.Service) map[ServicePortPortalName]bool {
if service == nil {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return nil
}
existingPortPortals := make(map[ServicePortPortalName]bool)
@ -337,19 +337,19 @@ func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortal
continue
}
if exists {
glog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName)
klog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName)
if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
klog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
}
}
glog.V(1).Infof("Adding new service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), protocol)
klog.V(1).Infof("Adding new service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), protocol)
info, err := proxier.addServicePortPortal(servicePortPortalName, protocol, listenIP, listenPort, proxier.udpIdleTimeout)
if err != nil {
glog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err)
klog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err)
continue
}
info.sessionAffinityType = service.Spec.SessionAffinity
glog.V(10).Infof("info: %#v", info)
klog.V(10).Infof("info: %#v", info)
}
if len(listenIPPortMap) > 0 {
// only one loadbalancer per service port portal
@ -361,7 +361,7 @@ func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortal
Port: servicePort.Name,
}
timeoutSeconds := 0
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
timeoutSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
proxier.loadBalancer.NewService(servicePortName, service.Spec.SessionAffinity, timeoutSeconds)
@ -371,13 +371,13 @@ func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortal
return existingPortPortals
}
func (proxier *Proxier) unmergeService(service *api.Service, existingPortPortals map[ServicePortPortalName]bool) {
func (proxier *Proxier) unmergeService(service *v1.Service, existingPortPortals map[ServicePortPortalName]bool) {
if service == nil {
return
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return
}
@ -409,15 +409,15 @@ func (proxier *Proxier) unmergeService(service *api.Service, existingPortPortals
continue
}
glog.V(1).Infof("Stopping service %q", servicePortPortalName)
klog.V(1).Infof("Stopping service %q", servicePortPortalName)
info, exists := proxier.getServiceInfo(servicePortPortalName)
if !exists {
glog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName)
klog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName)
continue
}
if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
klog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
}
}
@ -428,23 +428,23 @@ func (proxier *Proxier) unmergeService(service *api.Service, existingPortPortals
}
}
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
func (proxier *Proxier) OnServiceAdd(service *v1.Service) {
_ = proxier.mergeService(service)
}
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) {
existingPortPortals := proxier.mergeService(service)
proxier.unmergeService(oldService, existingPortPortals)
}
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
func (proxier *Proxier) OnServiceDelete(service *v1.Service) {
proxier.unmergeService(service, map[ServicePortPortalName]bool{})
}
func (proxier *Proxier) OnServiceSynced() {
}
func sameConfig(info *serviceInfo, service *api.Service, protocol api.Protocol, listenPort int) bool {
func sameConfig(info *serviceInfo, service *v1.Service, protocol v1.Protocol, listenPort int) bool {
return info.protocol == protocol && info.portal.port == listenPort && info.sessionAffinityType == service.Spec.SessionAffinity
}

View File

@ -29,10 +29,10 @@ import (
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
netshtest "k8s.io/kubernetes/pkg/util/netsh/testing"
)
@ -241,11 +241,11 @@ func getPortNum(t *testing.T, addr string) int {
func TestTCPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -268,11 +268,11 @@ func TestTCPProxy(t *testing.T) {
func TestUDPProxy(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -295,11 +295,11 @@ func TestUDPProxy(t *testing.T) {
func TestUDPProxyTimeout(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -327,18 +327,18 @@ func TestMultiPortProxy(t *testing.T) {
lb := NewLoadBalancerRR()
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
}},
})
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
}},
})
@ -379,9 +379,9 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
}
waitForNumProxyLoops(t, p, 0)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Spec: api.ServiceSpec{ClusterIP: "0.0.0.0", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "0.0.0.0", Ports: []v1.ServicePort{{
Name: "p",
Port: 0,
Protocol: "TCP",
@ -430,11 +430,11 @@ func stopProxyByName(proxier *Proxier, service ServicePortPortalName) error {
func TestTCPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -474,11 +474,11 @@ func TestTCPProxyStop(t *testing.T) {
func TestUDPProxyStop(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -512,11 +512,11 @@ func TestUDPProxyStop(t *testing.T) {
func TestTCPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -540,9 +540,9 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "TCP",
@ -557,11 +557,11 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
func TestUDPProxyUpdateDelete(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -584,9 +584,9 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "UDP",
@ -601,11 +601,11 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
endpoint := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
@ -629,9 +629,9 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "TCP",
@ -644,9 +644,9 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "TCP",
@ -663,11 +663,11 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
endpoint := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
@ -691,9 +691,9 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
conn.Close()
waitForNumProxyLoops(t, p, 1)
p.OnServiceDelete(&api.Service{
p.OnServiceDelete(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "UDP",
@ -706,9 +706,9 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
// need to add endpoint here because it got clean up during service delete
lb.OnEndpointsAdd(endpoint)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "UDP",
@ -725,11 +725,11 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
func TestTCPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -748,9 +748,9 @@ func TestTCPProxyUpdatePort(t *testing.T) {
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
Name: "p",
Port: 0,
Protocol: "TCP",
@ -773,11 +773,11 @@ func TestTCPProxyUpdatePort(t *testing.T) {
func TestUDPProxyUpdatePort(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
}},
})
@ -795,9 +795,9 @@ func TestUDPProxyUpdatePort(t *testing.T) {
}
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
Name: "p",
Port: 0,
Protocol: "UDP",
@ -818,11 +818,11 @@ func TestUDPProxyUpdatePort(t *testing.T) {
func TestProxyUpdatePublicIPs(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
lb.OnEndpointsAdd(&api.Endpoints{
lb.OnEndpointsAdd(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
})
@ -841,10 +841,10 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
p.OnServiceAdd(&api.Service{
p.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
@ -870,11 +870,11 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
func TestProxyUpdatePortal(t *testing.T) {
lb := NewLoadBalancerRR()
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
endpoint := &api.Endpoints{
endpoint := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
}},
}
lb.OnEndpointsAdd(endpoint)
@ -894,18 +894,18 @@ func TestProxyUpdatePortal(t *testing.T) {
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
waitForNumProxyLoops(t, p, 1)
svcv0 := &api.Service{
svcv0 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
}}},
}
svcv1 := &api.Service{
svcv1 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",
@ -918,9 +918,9 @@ func TestProxyUpdatePortal(t *testing.T) {
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
}
svcv2 := &api.Service{
svcv2 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: "None", Ports: []v1.ServicePort{{
Name: "p",
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
Protocol: "TCP",
@ -932,9 +932,9 @@ func TestProxyUpdatePortal(t *testing.T) {
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
}
svcv3 := &api.Service{
svcv3 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Spec: api.ServiceSpec{ClusterIP: listenIP, Ports: []api.ServicePort{{
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
Name: "p",
Port: int32(svcInfo.portal.port),
Protocol: "TCP",

View File

@ -26,11 +26,11 @@ import (
"sync/atomic"
"time"
"github.com/golang/glog"
"github.com/miekg/dns"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/util/ipconfig"
"k8s.io/utils/exec"
@ -78,7 +78,7 @@ type proxySocket interface {
ListenPort() int
}
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (proxySocket, error) {
func newProxySocket(protocol v1.Protocol, ip net.IP, port int) (proxySocket, error) {
host := ""
if ip != nil {
host = ip.String()
@ -101,6 +101,8 @@ func newProxySocket(protocol api.Protocol, ip net.IP, port int) (proxySocket, er
return nil, err
}
return &udpProxySocket{UDPConn: conn, port: port}, nil
case "SCTP":
return nil, fmt.Errorf("SCTP is not supported for user space proxy")
}
return nil, fmt.Errorf("unknown protocol %q", protocol)
}
@ -131,10 +133,10 @@ func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string
}
endpoint, err := proxier.loadBalancer.NextEndpoint(servicePortName, srcAddr, sessionAffinityReset)
if err != nil {
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
klog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
return nil, err
}
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
klog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
@ -142,7 +144,7 @@ func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string
if isTooManyFDsError(err) {
panic("Dial failed: " + err.Error())
}
glog.Errorf("Dial failed: %v", err)
klog.Errorf("Dial failed: %v", err)
sessionAffinityReset = true
continue
}
@ -171,13 +173,13 @@ func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
// Then the service port was just closed so the accept failure is to be expected.
return
}
glog.Errorf("Accept failed: %v", err)
klog.Errorf("Accept failed: %v", err)
continue
}
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
klog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier)
if err != nil {
glog.Errorf("Failed to connect to balancer: %v", err)
klog.Errorf("Failed to connect to balancer: %v", err)
inConn.Close()
continue
}
@ -190,7 +192,7 @@ func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
func proxyTCP(in, out *net.TCPConn) {
var wg sync.WaitGroup
wg.Add(2)
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
klog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
go copyBytes("from backend", in, out, &wg)
go copyBytes("to backend", out, in, &wg)
@ -199,14 +201,14 @@ func proxyTCP(in, out *net.TCPConn) {
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
defer wg.Done()
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
klog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
n, err := io.Copy(dest, src)
if err != nil {
if !isClosedError(err) {
glog.Errorf("I/O error: %v", err)
klog.Errorf("I/O error: %v", err)
}
}
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
klog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
dest.Close()
src.Close()
}
@ -281,7 +283,7 @@ func appendDNSSuffix(msg *dns.Msg, buffer []byte, length int, dnsSuffix string)
msg.Question[0].Name = origName
if err != nil {
glog.Warningf("Unable to pack DNS packet. Error is: %v", err)
klog.Warningf("Unable to pack DNS packet. Error is: %v", err)
return length, err
}
@ -308,7 +310,7 @@ func recoverDNSQuestion(origName string, msg *dns.Msg, buffer []byte, length int
mbuf, err := msg.PackBuffer(buffer)
if err != nil {
glog.Warningf("Unable to pack DNS packet. Error is: %v", err)
klog.Warningf("Unable to pack DNS packet. Error is: %v", err)
return length, err
}
@ -328,7 +330,7 @@ func processUnpackedDNSQueryPacket(
length int,
dnsSearch []string) int {
if dnsSearch == nil || len(dnsSearch) == 0 {
glog.V(1).Infof("DNS search list is not initialized and is empty.")
klog.V(1).Infof("DNS search list is not initialized and is empty.")
return length
}
@ -346,13 +348,13 @@ func processUnpackedDNSQueryPacket(
state.msg.MsgHdr.Id = msg.MsgHdr.Id
if index < 0 || index >= int32(len(dnsSearch)) {
glog.V(1).Infof("Search index %d is out of range.", index)
klog.V(1).Infof("Search index %d is out of range.", index)
return length
}
length, err := appendDNSSuffix(msg, buffer, length, dnsSearch[index])
if err != nil {
glog.Errorf("Append DNS suffix failed: %v", err)
klog.Errorf("Append DNS suffix failed: %v", err)
}
return length
@ -371,7 +373,7 @@ func processUnpackedDNSResponsePacket(
var drop bool
var err error
if dnsSearch == nil || len(dnsSearch) == 0 {
glog.V(1).Infof("DNS search list is not initialized and is empty.")
klog.V(1).Infof("DNS search list is not initialized and is empty.")
return drop, length
}
@ -387,19 +389,19 @@ func processUnpackedDNSResponsePacket(
drop = true
length, err = appendDNSSuffix(state.msg, buffer, length, dnsSearch[index])
if err != nil {
glog.Errorf("Append DNS suffix failed: %v", err)
klog.Errorf("Append DNS suffix failed: %v", err)
}
_, err = svrConn.Write(buffer[0:length])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Write failed: %v", err)
klog.Errorf("Write failed: %v", err)
}
}
} else {
length, err = recoverDNSQuestion(state.msg.Question[0].Name, msg, buffer, length)
if err != nil {
glog.Errorf("Recover DNS question failed: %v", err)
klog.Errorf("Recover DNS question failed: %v", err)
}
dnsClients.mu.Lock()
@ -419,7 +421,7 @@ func processDNSQueryPacket(
dnsSearch []string) (int, error) {
msg := &dns.Msg{}
if err := msg.Unpack(buffer[:length]); err != nil {
glog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
klog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
return length, err
}
@ -430,14 +432,14 @@ func processDNSQueryPacket(
// QDCOUNT
if len(msg.Question) != 1 {
glog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question))
glog.V(1).Infof("DNS suffix appending does not support more than one question.")
klog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question))
klog.V(1).Infof("DNS suffix appending does not support more than one question.")
return length, nil
}
// ANCOUNT, NSCOUNT, ARCOUNT
if len(msg.Answer) != 0 || len(msg.Ns) != 0 || len(msg.Extra) != 0 {
glog.V(1).Infof("DNS packet contains more than question section.")
klog.V(1).Infof("DNS packet contains more than question section.")
return length, nil
}
@ -446,7 +448,7 @@ func processDNSQueryPacket(
if packetRequiresDNSSuffix(dnsQType, dnsQClass) {
host, _, err := net.SplitHostPort(cliAddr.String())
if err != nil {
glog.V(1).Infof("Failed to get host from client address: %v", err)
klog.V(1).Infof("Failed to get host from client address: %v", err)
host = cliAddr.String()
}
@ -466,7 +468,7 @@ func processDNSResponsePacket(
var drop bool
msg := &dns.Msg{}
if err := msg.Unpack(buffer[:length]); err != nil {
glog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
klog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
return drop, length, err
}
@ -477,7 +479,7 @@ func processDNSResponsePacket(
// QDCOUNT
if len(msg.Question) != 1 {
glog.V(1).Infof("Number of entries in the response section of the DNS packet is: %d", len(msg.Answer))
klog.V(1).Infof("Number of entries in the response section of the DNS packet is: %d", len(msg.Answer))
return drop, length, nil
}
@ -486,7 +488,7 @@ func processDNSResponsePacket(
if packetRequiresDNSSuffix(dnsQType, dnsQClass) {
host, _, err := net.SplitHostPort(cliAddr.String())
if err != nil {
glog.V(1).Infof("Failed to get host from client address: %v", err)
klog.V(1).Infof("Failed to get host from client address: %v", err)
host = cliAddr.String()
}
@ -503,7 +505,7 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
dnsSearch = []string{"", namespaceServiceDomain, serviceDomain, clusterDomain}
execer := exec.New()
ipconfigInterface := ipconfig.New(execer)
suffixList, err := ipconfigInterface.GetDnsSuffixSearchList()
suffixList, err := ipconfigInterface.GetDNSSuffixSearchList()
if err == nil {
for _, suffix := range suffixList {
dnsSearch = append(dnsSearch, suffix)
@ -523,11 +525,11 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
if err != nil {
if e, ok := err.(net.Error); ok {
if e.Temporary() {
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
klog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
continue
}
}
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
klog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
break
}
@ -535,7 +537,7 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
if isDNSService(service.Port) {
n, err = processDNSQueryPacket(myInfo.dnsClients, cliAddr, buffer[:], n, dnsSearch)
if err != nil {
glog.Errorf("Process DNS query packet failed: %v", err)
klog.Errorf("Process DNS query packet failed: %v", err)
}
}
@ -549,14 +551,14 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv
_, err = svrConn.Write(buffer[0:n])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Write failed: %v", err)
klog.Errorf("Write failed: %v", err)
// TODO: Maybe tear down the goroutine for this client/server pair?
}
continue
}
err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
klog.Errorf("SetDeadline failed: %v", err)
continue
}
}
@ -570,14 +572,14 @@ func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, dnsClients
if !found {
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
glog.V(3).Infof("New UDP connection from %s", cliAddr)
klog.V(3).Infof("New UDP connection from %s", cliAddr)
var err error
svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
if err != nil {
return nil, err
}
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
glog.Errorf("SetDeadline failed: %v", err)
klog.Errorf("SetDeadline failed: %v", err)
return nil, err
}
activeClients.clients[cliAddr.String()] = svrConn
@ -598,7 +600,7 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ
n, err := svrConn.Read(buffer[0:])
if err != nil {
if !logTimeout(err) {
glog.Errorf("Read failed: %v", err)
klog.Errorf("Read failed: %v", err)
}
break
}
@ -607,20 +609,20 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ
if isDNSService(service.Port) {
drop, n, err = processDNSResponsePacket(svrConn, dnsClients, cliAddr, buffer[:], n, dnsSearch)
if err != nil {
glog.Errorf("Process DNS response packet failed: %v", err)
klog.Errorf("Process DNS response packet failed: %v", err)
}
}
if !drop {
err = svrConn.SetDeadline(time.Now().Add(timeout))
if err != nil {
glog.Errorf("SetDeadline failed: %v", err)
klog.Errorf("SetDeadline failed: %v", err)
break
}
n, err = udp.WriteTo(buffer[0:n], cliAddr)
if err != nil {
if !logTimeout(err) {
glog.Errorf("WriteTo failed: %v", err)
klog.Errorf("WriteTo failed: %v", err)
}
break
}

View File

@ -25,9 +25,9 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/util/slice"
)
@ -46,7 +46,7 @@ type affinityState struct {
}
type affinityPolicy struct {
affinityType api.ServiceAffinity
affinityType v1.ServiceAffinity
affinityMap map[string]*affinityState // map client IP -> affinity info
ttlSeconds int
}
@ -66,7 +66,7 @@ type balancerState struct {
affinity affinityPolicy
}
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlSeconds int) *affinityPolicy {
func newAffinityPolicy(affinityType v1.ServiceAffinity, ttlSeconds int) *affinityPolicy {
return &affinityPolicy{
affinityType: affinityType,
affinityMap: make(map[string]*affinityState),
@ -81,8 +81,8 @@ func NewLoadBalancerRR() *LoadBalancerRR {
}
}
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) error {
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error {
klog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
@ -90,14 +90,14 @@ func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType
}
// This assumes that lb.lock is already held.
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlSeconds int) *balancerState {
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) *balancerState {
if ttlSeconds == 0 {
ttlSeconds = int(api.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
ttlSeconds = int(v1.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
}
if _, exists := lb.services[svcPort]; !exists {
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
klog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
} else if affinityType != "" {
lb.services[svcPort].affinity.affinityType = affinityType
}
@ -105,7 +105,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi
}
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
klog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
lb.lock.Lock()
defer lb.lock.Unlock()
delete(lb.services, svcPort)
@ -114,7 +114,7 @@ func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
// return true if this service is using some form of session affinity.
func isSessionAffinity(affinity *affinityPolicy) bool {
// Should never be empty string, but checking for it to be safe.
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
if affinity.affinityType == "" || affinity.affinityType == v1.ServiceAffinityNone {
return false
}
return true
@ -135,7 +135,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
if len(state.endpoints) == 0 {
return "", ErrMissingEndpoints
}
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
klog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
@ -153,7 +153,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
// Affinity wins.
endpoint := sessionAffinity.endpoint
sessionAffinity.lastUsed = time.Now()
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
klog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
return endpoint, nil
}
}
@ -172,7 +172,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
affinity.lastUsed = time.Now()
affinity.endpoint = endpoint
affinity.clientIP = ipaddr
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
klog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
}
return endpoint, nil
@ -204,7 +204,7 @@ func flattenValidEndpoints(endpoints []hostPortPair) []string {
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
for _, affinity := range state.affinity.affinityMap {
if affinity.endpoint == endpoint {
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
klog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
delete(state.affinity.affinityMap, affinity.clientIP)
}
}
@ -227,7 +227,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn
}
for mKey, mVal := range allEndpoints {
if mVal == 1 {
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
klog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
removeSessionAffinityByEndpoint(state, svcPort, mKey)
}
}
@ -235,7 +235,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn
// buildPortsToEndpointsMap builds a map of portname -> all ip:ports for that
// portname. Explode Endpoints.Subsets[*] into this structure.
func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPair {
func buildPortsToEndpointsMap(endpoints *v1.Endpoints) map[string][]hostPortPair {
portsToEndpoints := map[string][]hostPortPair{}
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
@ -251,7 +251,7 @@ func buildPortsToEndpointsMap(endpoints *api.Endpoints) map[string][]hostPortPai
return portsToEndpoints
}
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
@ -263,13 +263,13 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
state, exists := lb.services[svcPort]
if !exists || state == nil || len(newEndpoints) > 0 {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsAdd can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
@ -278,7 +278,7 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *api.Endpoints) {
}
}
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
oldPortsToEndpoints := buildPortsToEndpointsMap(oldEndpoints)
registeredEndpoints := make(map[proxy.ServicePortName]bool)
@ -297,13 +297,13 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoin
}
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
lb.updateAffinityMap(svcPort, newEndpoints)
// OnEndpointsUpdate can be called without NewService being called externally.
// To be safe we will call it here. A new service will only be created
// if one does not already exist. The affinity will be updated
// later, once NewService is called.
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
state.endpoints = slice.ShuffleStrings(newEndpoints)
// Reset the round-robin index.
@ -315,7 +315,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoin
for portname := range oldPortsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
if _, exists := registeredEndpoints[svcPort]; !exists {
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
// Reset but don't delete.
state := lb.services[svcPort]
state.endpoints = []string{}
@ -325,7 +325,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoin
}
}
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *v1.Endpoints) {
portsToEndpoints := buildPortsToEndpointsMap(endpoints)
lb.lock.Lock()
@ -333,7 +333,7 @@ func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *api.Endpoints) {
for portname := range portsToEndpoints {
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort)
// If the service is still around, reset but don't delete.
if state, ok := lb.services[svcPort]; ok {
state.endpoints = []string{}
@ -367,7 +367,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa
}
for ip, affinity := range state.affinity.affinityMap {
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
klog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
delete(state.affinity.affinityMap, ip)
}
}

View File

@ -20,9 +20,9 @@ import (
"net"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
)
@ -104,11 +104,11 @@ func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 40}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
@ -141,11 +141,11 @@ func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
}},
}
loadBalancer.OnEndpointsAdd(endpoints)
@ -168,16 +168,16 @@ func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
},
},
}
@ -210,20 +210,20 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpointsv1 := &api.Endpoints{
endpointsv1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
Addresses: []v1.EndpointAddress{{IP: "endpoint2"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
},
},
}
@ -249,16 +249,16 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
endpointsv2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
},
{
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
Addresses: []v1.EndpointAddress{{IP: "endpoint5"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
},
},
}
@ -283,7 +283,7 @@ func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
@ -300,21 +300,21 @@ func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
endpoints1 := &api.Endpoints{
endpoints1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 123}},
},
},
}
endpoints2 := &api.Endpoints{
endpoints2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
Ports: []v1.EndpointPort{{Name: "p", Port: 456}},
},
},
}
@ -357,13 +357,13 @@ func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
@ -413,15 +413,15 @@ func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
}
// Call OnEndpointsUpdate() before NewService()
endpoints := &api.Endpoints{
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
@ -473,13 +473,13 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
@ -494,12 +494,12 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
client3Endpoint := shuffledEndpoints[2]
endpointsv2 := &api.Endpoints{
endpointsv2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}},
},
},
}
@ -516,12 +516,12 @@ func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
endpointsv3 := &api.Endpoints{
endpointsv3 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
},
},
}
@ -546,13 +546,13 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpointsv1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
@ -567,12 +567,12 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Then update the configuration with one fewer endpoints, make sure
// we start in the beginning again
endpointsv2 := &api.Endpoints{
endpointsv2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
@ -586,7 +586,7 @@ func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
// Clear endpoints
endpointsv3 := &api.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
@ -605,24 +605,24 @@ func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
if err == nil || len(endpoint) != 0 {
t.Errorf("Didn't fail with non-existent service")
}
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints1 := &api.Endpoints{
loadBalancer.NewService(fooService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints1 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
},
},
}
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints2 := &api.Endpoints{
loadBalancer.NewService(barService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints2 := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
Subsets: []api.EndpointSubset{
Subsets: []v1.EndpointSubset{
{
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
},
},
}
@ -674,13 +674,13 @@ func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
}
// Call NewService() before OnEndpointsUpdate()
loadBalancer.NewService(service, api.ServiceAffinityClientIP, int(api.DefaultClientIPServiceAffinitySeconds))
endpoints := &api.Endpoints{
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
Subsets: []api.EndpointSubset{
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
Subsets: []v1.EndpointSubset{
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
},
}
loadBalancer.OnEndpointsAdd(endpoints)