Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -14,7 +14,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/ipvs/testing:go_default_library",
"//pkg/proxy/util:go_default_library",
@ -25,10 +24,11 @@ go_test(
"//pkg/util/iptables/testing:go_default_library",
"//pkg/util/ipvs:go_default_library",
"//pkg/util/ipvs/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
@ -37,48 +37,15 @@ go_test(
go_library(
name = "go_default_library",
srcs = [
"graceful_termination.go",
"ipset.go",
"netlink.go",
"netlink_linux.go",
"netlink_unsupported.go",
"proxier.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"netlink_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"netlink_unsupported.go",
],
"//conditions:default": [],
}),
],
importpath = "k8s.io/kubernetes/pkg/proxy/ipvs",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/healthcheck:go_default_library",
"//pkg/proxy/metrics:go_default_library",
@ -90,13 +57,13 @@ go_library(
"//pkg/util/ipvs:go_default_library",
"//pkg/util/net:go_default_library",
"//pkg/util/sysctl:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [

View File

@ -2,6 +2,7 @@ reviewers:
- thockin
- brendandburns
- m1093782566
- Lion-Wei
approvers:
- thockin
- brendandburns

View File

@ -27,180 +27,239 @@ IPVS runs on a host and acts as a load balancer in front of a cluster of real se
and UDP-based services to the real servers, and make services of real servers appear as virtual services on a single IP address.
## IPVS vs. IPTABLES
IPVS mode was introduced in Kubernetes v1.8 and goes beta in v1.9. IPTABLES mode was added in v1.1 and become the default operating mode since v1.2. Both IPVS and IPTABLES are based on `netfilter`.
IPVS mode was introduced in Kubernetes v1.8, goes beta in v1.9 and GA in v1.11. IPTABLES mode was added in v1.1 and become the default operating mode since v1.2. Both IPVS and IPTABLES are based on `netfilter`.
Differences between IPVS mode and IPTABLES mode are as follows:
1. IPVS provides better scalability and performance for large clusters.
1. IPVS provides better scalability and performance for large clusters.
2. IPVS supports more sophisticated load balancing algorithms than iptables (least load, least connections, locality, weighted, etc.).
2. IPVS supports more sophisticated load balancing algorithms than iptables (least load, least connections, locality, weighted, etc.).
3. IPVS supports server health checking and connection retries, etc.
### When ipvs falls back to iptables
IPVS proxier will employ iptables in doing packet filtering, SNAT and supporting NodePort type service. Specifically, ipvs proxier will fall back on iptables in the following 4 scenarios.
IPVS proxier will employ iptables in doing packet filtering, SNAT or masquerade.
Specifically, ipvs proxier will use ipset to store source or destination address of traffics that need DROP or do masquerade, to make sure the number of iptables rules be constant, no metter how many services we have.
Here is the table of ipset sets that ipvs proxier used.
| set name | members | usage |
| :----------------------------- | ---------------------------------------- | ---------------------------------------- |
| KUBE-CLUSTER-IP | All service IP + port | Mark-Masq for cases that `masquerade-all=true` or `clusterCIDR` specified |
| KUBE-LOOP-BACK | All service IP + port + IP | masquerade for solving hairpin purpose |
| KUBE-EXTERNAL-IP | service external IP + port | masquerade for packages to external IPs |
| KUBE-LOAD-BALANCER | load balancer ingress IP + port | masquerade for packages to load balancer type service |
| KUBE-LOAD-BALANCER-LOCAL | LB ingress IP + port with `externalTrafficPolicy=local` | accept packages to load balancer with `externalTrafficPolicy=local` |
| KUBE-LOAD-BALANCER-FW | load balancer ingress IP + port with `loadBalancerSourceRanges` | package filter for load balancer with `loadBalancerSourceRanges` specified |
| KUBE-LOAD-BALANCER-SOURCE-CIDR | load balancer ingress IP + port + source CIDR | package filter for load balancer with `loadBalancerSourceRanges` specified |
| KUBE-NODE-PORT-TCP | nodeport type service TCP port | masquerade for packets to nodePort(TCP) |
| KUBE-NODE-PORT-LOCAL-TCP | nodeport type service TCP port with `externalTrafficPolicy=local` | accept packages to nodeport service with `externalTrafficPolicy=local` |
| KUBE-NODE-PORT-UDP | nodeport type service UDP port | masquerade for packets to nodePort(UDP) |
| KUBE-NODE-PORT-LOCAL-UDP | nodeport type service UDP port with `externalTrafficPolicy=local` | accept packages to nodeport service with `externalTrafficPolicy=local` |
IPVS proxier will fall back on iptables in the following scenarios.
**1. kube-proxy starts with --masquerade-all=true**
If kube-proxy starts with `--masquerade-all=true`, ipvs proxier will masquerade all traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose there is a service with Cluster IP `10.244.5.1` and port `8080`, then the iptables installed by ipvs proxier should be like what is shown below.
If kube-proxy starts with `--masquerade-all=true`, ipvs proxier will masquerade all traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose kube-proxy have flag `--masquerade-all=true` specified, then the iptables installed by ipvs proxier should be like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- 0.0.0.0/0 10.244.5.1 /* default/foo:http cluster IP */ tcp dpt:8080
target prot opt source destination
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
```
**2. Specify cluster CIDR in kube-proxy startup**
If kube-proxy starts with `--cluster-cidr=<cidr>`, ipvs proxier will masquerade off-cluster traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose kube-proxy is provided with the cluster cidr `10.244.16.0/24`, and service Cluster IP is `10.244.5.1` and port is `8080`, then the iptables installed by ipvs proxier should be like what is shown below.
If kube-proxy starts with `--cluster-cidr=<cidr>`, ipvs proxier will masquerade off-cluster traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose kube-proxy is provided with the cluster cidr `10.244.16.0/24`, then the iptables installed by ipvs proxier should be like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- !10.244.16.0/24 10.244.5.1 /* default/foo:http cluster IP */ tcp dpt:8080
```
**3. Load Balancer Source Ranges is specified for LB type service**
When service's `LoadBalancerStatus.ingress.IP` is not empty and service's `LoadBalancerSourceRanges` is specified, ipvs proxier will install iptables which looks like what is shown below.
Suppose service's `LoadBalancerStatus.ingress.IP` is `10.96.1.2` and service's `LoadBalancerSourceRanges` is `10.120.2.0/24`.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-SERVICES (2 references)
target prot opt source destination
ACCEPT tcp -- 10.120.2.0/24 10.96.1.2 /* default/foo:http loadbalancer IP */ tcp dpt:8080
DROP tcp -- 0.0.0.0/0 10.96.1.2 /* default/foo:http loadbalancer IP */ tcp dpt:8080
```
**4. Support NodePort type service**
For supporting NodePort type service, ipvs will recruit the existing implementation in iptables proxier. For example,
```shell
# kubectl describe svc nginx-service
Name: nginx-service
...
Type: NodePort
IP: 10.101.28.148
Port: http 3080/TCP
NodePort: http 31604/TCP
Endpoints: 172.17.0.2:80
Session Affinity: None
# iptables -t nat -nL
[root@100-106-179-225 ~]# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- !172.16.0.0/16 10.101.28.148 /* default/nginx-service:http cluster IP */ tcp dpt:3080
KUBE-SVC-6IM33IEVEEV7U3GP tcp -- 0.0.0.0/0 10.101.28.148 /* default/nginx-service:http cluster IP */ tcp dpt:3080
KUBE-NODEPORTS all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL
Chain KUBE-NODEPORTS (1 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp dpt:31604
KUBE-SVC-6IM33IEVEEV7U3GP tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp dpt:31604
Chain KUBE-SVC-6IM33IEVEEV7U3GP (2 references)
target prot opt source destination
KUBE-SEP-Q3UCPZ54E6Q2R4UT all -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */
Chain KUBE-SEP-Q3UCPZ54E6Q2R4UT (1 references)
target prot opt source destination
KUBE-MARK-MASQ all -- 172.17.0.2 0.0.0.0/0 /* default/nginx-service:http */
DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp to:172.17.0.2:80
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-MARK-MASQ (3 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ all -- !10.244.16.0/24 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
```
**3. Load Balancer type service**
For loadBalancer type service, ipvs proxier will install iptables with match of ipset `KUBE-LOAD-BALANCER`.
Specially when service's `LoadBalancerSourceRanges` is specified or specified `externalTrafficPolicy=local`,
ipvs proxier will create ipset sets `KUBE-LOAD-BALANCER-LOCAL`/`KUBE-LOAD-BALANCER-FW`/`KUBE-LOAD-BALANCER-SOURCE-CIDR`
and install iptables accordingly, which should looks like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-FIREWALL (1 references)
target prot opt source destination
RETURN all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER-SOURCE-CIDR dst,dst,src
KUBE-MARK-DROP all -- 0.0.0.0/0 0.0.0.0/0
Chain KUBE-LOAD-BALANCER (1 references)
target prot opt source destination
KUBE-FIREWALL all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER-FW dst,dst
RETURN all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER-LOCAL dst,dst
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0
Chain KUBE-MARK-DROP (1 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-LOAD-BALANCER all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER dst,dst
```
**4. NodePort type service**
For NodePort type service, ipvs proxier will install iptables with match of ipset `KUBE-NODE-PORT-TCP/KUBE-NODE-PORT-UDP`.
When specified `externalTrafficPolicy=local`,ipvs proxier will create ipset sets `KUBE-NODE-PORT-LOCAL-TC/KUBE-NODE-PORT-LOCAL-UDP`
and install iptables accordingly, which should looks like what is shown below.
Suppose service with TCP type nodePort.
```shell
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-NODE-PORT (1 references)
target prot opt source destination
RETURN all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-NODE-PORT-LOCAL-TCP dst
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-NODE-PORT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-NODE-PORT-TCP dst
```
**5. Service with externalIPs specified**
For service with `externalIPs` specified, ipvs proxier will install iptables with match of ipset `KUBE-EXTERNAL-IP`,
Suppose we have service with `externalIPs` specified, iptables rules should looks like what is shown below.
```shell
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst PHYSDEV match ! --physdev-is-in ADDRTYPE match src-type !LOCAL
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst ADDRTYPE match dst-type LOCAL
```
## Run kube-proxy in ipvs mode
Currently, local-up scripts, GCE scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags.
Currently, local-up scripts, GCE scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags.
### Prerequisite
Ensure IPVS required kernel modules
Ensure IPVS required kernel modules (**Notes**: use `nf_conntrack` instead of `nf_conntrack_ipv4` for Linux kernel 4.19 and later)
```shell
ip_vs
ip_vs_rr
@ -248,13 +307,13 @@ lsmod | grep -e ipvs -e nf_conntrack_ipv4
cut -f1 -d " " /proc/modules | grep -e ip_vs -e nf_conntrack_ipv4
```
Packages such as `ipset` should also be installed on the node before using IPVS mode.
Packages such as `ipset` should also be installed on the node before using IPVS mode.
Kube-proxy will fall back to IPTABLES mode if those requirements are not met.
### Local UP Cluster
Kube-proxy will run in iptables mode by default in a [local-up cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md).
Kube-proxy will run in iptables mode by default in a [local-up cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md).
To use IPVS mode, users should export the env `KUBE_PROXY_MODE=ipvs` to specify the ipvs mode before [starting the cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md#starting-the-cluster):
```shell
@ -266,7 +325,7 @@ export KUBE_PROXY_MODE=ipvs
Similar to local-up cluster, kube-proxy in [clusters running on GCE](https://kubernetes.io/docs/getting-started-guides/gce/) run in iptables mode by default. Users need to export the env `KUBE_PROXY_MODE=ipvs` before [starting a cluster](https://kubernetes.io/docs/getting-started-guides/gce/#starting-a-cluster):
```shell
#before running one of the commmands chosen to start a cluster:
#before running one of the commands chosen to start a cluster:
# curl -sS https://get.k8s.io | bash
# wget -q -O - https://get.k8s.io | bash
# cluster/kube-up.sh
@ -275,20 +334,19 @@ export KUBE_PROXY_MODE=ipvs
### Cluster Created by Kubeadm
Kube-proxy will run in iptables mode by default in a cluster deployed by [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/).
If you are using kubeadm with a [configuration file](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file), you can specify the ipvs mode adding `SupportIPVSProxyMode: true` below the `kubeProxy` field.
If you are using kubeadm with a [configuration file](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file), you have to add `mode: ipvs` and also add `SupportIPVSProxyMode: true` below the `kubeProxy` field as part of the kubeadm configuration.
```json
kind: MasterConfiguration
apiVersion: kubeadm.k8s.io/v1alpha1
...
kubeProxy:
config:
featureGates: SupportIPVSProxyMode=true
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
...
```
Note that in Kubernetes 1.11 and later, `SupportIPVSProxyMode` is set to `true` by default.
before running
`kube init --config <path_to_configuration_file>`
@ -301,7 +359,7 @@ kubeadm init --feature-gates=SupportIPVSProxyMode=true
to specify the ipvs mode before deploying the cluster.
**Notes**
**Notes**
If ipvs mode is successfully on, you should see ipvs proxy rules (use `ipvsadm`) like
```shell
# ipvsadm -ln
@ -316,7 +374,7 @@ or similar logs occur in kube-proxy logs (for example, `/tmp/kube-proxy.log` for
Using ipvs Proxier.
```
While there is no ipvs proxy rules or the following logs ocuurs indicate that the kube-proxy fails to use ipvs mode:
While there is no ipvs proxy rules or the following logs ocuurs indicate that the kube-proxy fails to use ipvs mode:
```
Can't use ipvs proxier, trying iptables proxier
Using iptables Proxier.
@ -352,7 +410,7 @@ UDP 10.0.0.10:53 rr
### Why kube-proxy can't start IPVS mode
Use the following check list to help you solve the problems:
Use the following check list to help you solve the problems:
**1. Enable IPVS feature gateway**

View File

@ -0,0 +1,220 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipvs
import (
"sync"
"time"
"fmt"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
utilipvs "k8s.io/kubernetes/pkg/util/ipvs"
)
const (
rsGracefulDeletePeriod = 15 * time.Minute
rsCheckDeleteInterval = 1 * time.Minute
)
// listItem stores real server information and the process time.
// If nothing special happened, real server will be delete after process time.
type listItem struct {
VirtualServer *utilipvs.VirtualServer
RealServer *utilipvs.RealServer
}
// String return the unique real server name(with virtual server information)
func (g *listItem) String() string {
return GetUniqueRSName(g.VirtualServer, g.RealServer)
}
// GetUniqueRSName return a string type unique rs name with vs information
func GetUniqueRSName(vs *utilipvs.VirtualServer, rs *utilipvs.RealServer) string {
return vs.String() + "/" + rs.String()
}
type graceTerminateRSList struct {
lock sync.Mutex
list map[string]*listItem
}
// add push an new element to the rsList
func (q *graceTerminateRSList) add(rs *listItem) bool {
q.lock.Lock()
defer q.lock.Unlock()
uniqueRS := rs.String()
if _, ok := q.list[uniqueRS]; ok {
return false
}
klog.V(5).Infof("Adding rs %v to graceful delete rsList", rs)
q.list[uniqueRS] = rs
return true
}
// remove remove an element from the rsList
func (q *graceTerminateRSList) remove(rs *listItem) bool {
q.lock.Lock()
defer q.lock.Unlock()
uniqueRS := rs.String()
if _, ok := q.list[uniqueRS]; ok {
return false
}
delete(q.list, uniqueRS)
return true
}
func (q *graceTerminateRSList) flushList(handler func(rsToDelete *listItem) (bool, error)) bool {
success := true
for name, rs := range q.list {
deleted, err := handler(rs)
if err != nil {
klog.Errorf("Try delete rs %q err: %v", name, err)
success = false
}
if deleted {
klog.Infof("lw: remote out of the list: %s", name)
q.remove(rs)
}
}
return success
}
// exist check whether the specified unique RS is in the rsList
func (q *graceTerminateRSList) exist(uniqueRS string) (*listItem, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if rs, ok := q.list[uniqueRS]; ok {
return rs, true
}
return nil, false
}
// GracefulTerminationManager manage rs graceful termination information and do graceful termination work
// rsList is the rs list to graceful termination, ipvs is the ipvsinterface to do ipvs delete/update work
type GracefulTerminationManager struct {
rsList graceTerminateRSList
ipvs utilipvs.Interface
}
// NewGracefulTerminationManager create a gracefulTerminationManager to manage ipvs rs graceful termination work
func NewGracefulTerminationManager(ipvs utilipvs.Interface) *GracefulTerminationManager {
l := make(map[string]*listItem)
return &GracefulTerminationManager{
rsList: graceTerminateRSList{
list: l,
},
ipvs: ipvs,
}
}
// InTerminationList to check whether specified unique rs name is in graceful termination list
func (m *GracefulTerminationManager) InTerminationList(uniqueRS string) bool {
_, exist := m.rsList.exist(uniqueRS)
return exist
}
// GracefulDeleteRS to update rs weight to 0, and add rs to graceful terminate list
func (m *GracefulTerminationManager) GracefulDeleteRS(vs *utilipvs.VirtualServer, rs *utilipvs.RealServer) error {
// Try to delete rs before add it to graceful delete list
ele := &listItem{
VirtualServer: vs,
RealServer: rs,
}
deleted, err := m.deleteRsFunc(ele)
if err != nil {
klog.Errorf("Delete rs %q err: %v", ele.String(), err)
}
if deleted {
return nil
}
rs.Weight = 0
err = m.ipvs.UpdateRealServer(vs, rs)
if err != nil {
return err
}
klog.V(5).Infof("Adding an element to graceful delete rsList: %+v", ele)
m.rsList.add(ele)
return nil
}
func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, error) {
klog.Infof("Trying to delete rs: %s", rsToDelete.String())
rss, err := m.ipvs.GetRealServers(rsToDelete.VirtualServer)
if err != nil {
return false, err
}
for _, rs := range rss {
if rsToDelete.RealServer.Equal(rs) {
if rs.ActiveConn != 0 {
return false, nil
}
klog.Infof("Deleting rs: %s", rsToDelete.String())
err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rs)
if err != nil {
return false, fmt.Errorf("Delete destination %q err: %v", rs.String(), err)
}
return true, nil
}
}
return true, fmt.Errorf("Failed to delete rs %q, can't find the real server", rsToDelete.String())
}
func (m *GracefulTerminationManager) tryDeleteRs() {
if !m.rsList.flushList(m.deleteRsFunc) {
klog.Errorf("Try flush graceful termination list err")
}
}
// MoveRSOutofGracefulDeleteList to delete an rs and remove it from the rsList immediately
func (m *GracefulTerminationManager) MoveRSOutofGracefulDeleteList(uniqueRS string) error {
rsToDelete, find := m.rsList.exist(uniqueRS)
if !find || rsToDelete == nil {
return fmt.Errorf("failed to find rs: %q", uniqueRS)
}
err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rsToDelete.RealServer)
if err != nil {
return err
}
m.rsList.remove(rsToDelete)
return nil
}
// Run start a goroutine to try to delete rs in the graceful delete rsList with an interval 1 minute
func (m *GracefulTerminationManager) Run() {
// before start, add leftover in delete rs to graceful delete rsList
vss, err := m.ipvs.GetVirtualServers()
if err != nil {
klog.Errorf("IPVS graceful delete manager failed to get IPVS virtualserver")
}
for _, vs := range vss {
rss, err := m.ipvs.GetRealServers(vs)
if err != nil {
klog.Errorf("IPVS graceful delete manager failed to get %v realserver", vs)
continue
}
for _, rs := range rss {
m.GracefulDeleteRS(vs, rs)
}
}
go wait.Until(m.tryDeleteRs, rsCheckDeleteInterval, wait.NeverStop)
}

View File

@ -18,11 +18,11 @@ package ipvs
import (
"k8s.io/apimachinery/pkg/util/sets"
utilversion "k8s.io/apimachinery/pkg/util/version"
utilipset "k8s.io/kubernetes/pkg/util/ipset"
utilversion "k8s.io/kubernetes/pkg/util/version"
"fmt"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@ -64,6 +64,12 @@ const (
kubeNodePortLocalSetUDPComment = "Kubernetes nodeport UDP port with externalTrafficPolicy=local"
kubeNodePortLocalSetUDP = "KUBE-NODE-PORT-LOCAL-UDP"
kubeNodePortSetSCTPComment = "Kubernetes nodeport SCTP port for masquerade purpose"
kubeNodePortSetSCTP = "KUBE-NODE-PORT-SCTP"
kubeNodePortLocalSetSCTPComment = "Kubernetes nodeport SCTP port with externalTrafficPolicy=local"
kubeNodePortLocalSetSCTP = "KUBE-NODE-PORT-LOCAL-SCTP"
)
// IPSetVersioner can query the current ipset version.
@ -119,7 +125,7 @@ func (set *IPSet) resetEntries() {
func (set *IPSet) syncIPSetEntries() {
appliedEntries, err := set.handle.ListEntries(set.Name)
if err != nil {
glog.Errorf("Failed to list ip set entries, error: %v", err)
klog.Errorf("Failed to list ip set entries, error: %v", err)
return
}
@ -134,18 +140,18 @@ func (set *IPSet) syncIPSetEntries() {
for _, entry := range currentIPSetEntries.Difference(set.activeEntries).List() {
if err := set.handle.DelEntry(entry, set.Name); err != nil {
if !utilipset.IsNotFoundError(err) {
glog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err)
klog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err)
}
} else {
glog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name)
klog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name)
}
}
// Create active entries
for _, entry := range set.activeEntries.Difference(currentIPSetEntries).List() {
if err := set.handle.AddEntry(entry, &set.IPSet, true); err != nil {
glog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err)
klog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err)
} else {
glog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name)
klog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name)
}
}
}
@ -153,7 +159,7 @@ func (set *IPSet) syncIPSetEntries() {
func ensureIPSet(set *IPSet) error {
if err := set.handle.CreateSet(&set.IPSet, true); err != nil {
glog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err)
klog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err)
return err
}
return nil
@ -163,13 +169,13 @@ func ensureIPSet(set *IPSet) error {
func checkMinVersion(vstring string) bool {
version, err := utilversion.ParseGeneric(vstring)
if err != nil {
glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err)
klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err)
return false
}
minVersion, err := utilversion.ParseGeneric(MinIPSetCheckVersion)
if err != nil {
glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err)
klog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err)
return false
}
return !version.LessThan(minVersion)

View File

@ -179,6 +179,26 @@ func TestSyncIPSetEntries(t *testing.T) {
currentEntries: []string{"80", "9090", "8081", "8082"},
expectedEntries: []string{"8080"},
},
{ // case 12
set: &utilipset.IPSet{
Name: "sctp-1",
},
setType: utilipset.HashIPPort,
ipv6: false,
activeEntries: []string{"172.17.0.4,sctp:80"},
currentEntries: nil,
expectedEntries: []string{"172.17.0.4,sctp:80"},
},
{ // case 1
set: &utilipset.IPSet{
Name: "sctp-2",
},
setType: utilipset.HashIPPort,
ipv6: true,
activeEntries: []string{"FE80::0202:B3FF:FE1E:8329,sctp:80"},
currentEntries: []string{"FE80::0202:B3FF:FE1E:8329,sctp:80"},
expectedEntries: []string{"FE80::0202:B3FF:FE1E:8329,sctp:80"},
},
}
for i := range testCases {

View File

@ -30,7 +30,9 @@ type NetLinkHandle interface {
EnsureDummyDevice(devName string) (exist bool, err error)
// DeleteDummyDevice deletes the given dummy device by name.
DeleteDummyDevice(devName string) error
// GetLocalAddresses returns all unique local type IP addresses based on filter device interface. If filter device is not given,
// it will list all unique local type addresses.
GetLocalAddresses(filterDev string) (sets.String, error)
// ListBindAddress will list all IP addresses which are bound in a given interface
ListBindAddress(devName string) ([]string, error)
// GetLocalAddresses returns all unique local type IP addresses based on specified device and filter device
// If device is not specified, it will list all unique local type addresses except filter device addresses
GetLocalAddresses(dev, filterDev string) (sets.String, error)
}

View File

@ -105,8 +105,25 @@ func (h *netlinkHandle) DeleteDummyDevice(devName string) error {
return h.LinkDel(dummy)
}
// ListBindAddress will list all IP addresses which are bound in a given interface
func (h *netlinkHandle) ListBindAddress(devName string) ([]string, error) {
dev, err := h.LinkByName(devName)
if err != nil {
return nil, fmt.Errorf("error get interface: %s, err: %v", devName, err)
}
addrs, err := h.AddrList(dev, 0)
if err != nil {
return nil, fmt.Errorf("error list bound address of interface: %s, err: %v", devName, err)
}
var ips []string
for _, addr := range addrs {
ips = append(ips, addr.IP.String())
}
return ips, nil
}
// GetLocalAddresses lists all LOCAL type IP addresses from host based on filter device.
// If filter device is not specified, it's equivalent to exec:
// If dev is not specified, it's equivalent to exec:
// $ ip route show table local type local proto kernel
// 10.0.0.1 dev kube-ipvs0 scope host src 10.0.0.1
// 10.0.0.10 dev kube-ipvs0 scope host src 10.0.0.10
@ -119,20 +136,28 @@ func (h *netlinkHandle) DeleteDummyDevice(devName string) error {
// Then cut the unique src IP fields,
// --> result set: [10.0.0.1, 10.0.0.10, 10.0.0.252, 100.106.89.164, 127.0.0.1, 192.168.122.1]
// If filter device is specified, it's equivalent to exec:
// If dev is specified, it's equivalent to exec:
// $ ip route show table local type local proto kernel dev kube-ipvs0
// 10.0.0.1 scope host src 10.0.0.1
// 10.0.0.10 scope host src 10.0.0.10
// Then cut the unique src IP fields,
// --> result set: [10.0.0.1, 10.0.0.10]
func (h *netlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
linkIndex := -1
if len(filterDev) != 0 {
// If filterDev is specified, the result will discard route of specified device and cut src from other routes.
func (h *netlinkHandle) GetLocalAddresses(dev, filterDev string) (sets.String, error) {
chosenLinkIndex, filterLinkIndex := -1, -1
if dev != "" {
link, err := h.LinkByName(dev)
if err != nil {
return nil, fmt.Errorf("error get device %s, err: %v", filterDev, err)
}
chosenLinkIndex = link.Attrs().Index
} else if filterDev != "" {
link, err := h.LinkByName(filterDev)
if err != nil {
return nil, fmt.Errorf("error get filter device %s, err: %v", filterDev, err)
}
linkIndex = link.Attrs().Index
filterLinkIndex = link.Attrs().Index
}
routeFilter := &netlink.Route{
@ -142,18 +167,20 @@ func (h *netlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error)
}
filterMask := netlink.RT_FILTER_TABLE | netlink.RT_FILTER_TYPE | netlink.RT_FILTER_PROTOCOL
// find filter device
if linkIndex != -1 {
routeFilter.LinkIndex = linkIndex
// find chosen device
if chosenLinkIndex != -1 {
routeFilter.LinkIndex = chosenLinkIndex
filterMask |= netlink.RT_FILTER_OIF
}
routes, err := h.RouteListFiltered(netlink.FAMILY_ALL, routeFilter, filterMask)
if err != nil {
return nil, fmt.Errorf("error list route table, err: %v", err)
}
res := sets.NewString()
for _, route := range routes {
if route.LinkIndex == filterLinkIndex {
continue
}
if route.Src != nil {
res.Insert(route.Src.String())
}

View File

@ -52,7 +52,12 @@ func (h *emptyHandle) DeleteDummyDevice(devName string) error {
return fmt.Errorf("netlink is not supported in this platform")
}
// GetLocalAddresses is part of interface.
func (h *emptyHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
// ListBindAddress is part of interface.
func (h *emptyHandle) ListBindAddress(devName string) ([]string, error) {
return nil, fmt.Errorf("netlink is not supported in this platform")
}
// GetLocalAddresses is part of interface.
func (h *emptyHandle) GetLocalAddresses(dev, filterDev string) (sets.String, error) {
return nil, fmt.Errorf("netlink is not supported in this platform")
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,7 @@ go_library(
tags = ["automanaged"],
deps = [
"//pkg/util/ipset:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)
@ -39,5 +39,5 @@ go_test(
name = "go_default_test",
srcs = ["fake_test.go"],
embed = [":go_default_library"],
deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"],
deps = ["//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library"],
)

View File

@ -39,36 +39,93 @@ func NewFakeNetlinkHandle() *FakeNetlinkHandle {
// EnsureAddressBind is a mock implementation
func (h *FakeNetlinkHandle) EnsureAddressBind(address, devName string) (exist bool, err error) {
if len(devName) == 0 {
return false, fmt.Errorf("Device name can't be empty")
}
if _, ok := h.localAddresses[devName]; !ok {
return false, fmt.Errorf("Error bind address: %s to a non-exist interface: %s", address, devName)
}
for _, addr := range h.localAddresses[devName] {
if addr == address {
// return true if the address is already bound to device
return true, nil
}
}
h.localAddresses[devName] = append(h.localAddresses[devName], address)
return false, nil
}
// UnbindAddress is a mock implementation
func (h *FakeNetlinkHandle) UnbindAddress(address, devName string) error {
return nil
if len(devName) == 0 {
return fmt.Errorf("Device name can't be empty")
}
if _, ok := h.localAddresses[devName]; !ok {
return fmt.Errorf("Error unbind address: %s from a non-exist interface: %s", address, devName)
}
for i, addr := range h.localAddresses[devName] {
if addr == address {
// delete address from slice h.localAddresses[devName]
h.localAddresses[devName] = append(h.localAddresses[devName][:i], h.localAddresses[devName][i+1:]...)
return nil
}
}
// return error message if address is not found in slice h.localAddresses[devName]
return fmt.Errorf("Address: %s is not found in interface: %s", address, devName)
}
// EnsureDummyDevice is a mock implementation
func (h *FakeNetlinkHandle) EnsureDummyDevice(devName string) (bool, error) {
return false, nil
if len(devName) == 0 {
return false, fmt.Errorf("Device name can't be empty")
}
if _, ok := h.localAddresses[devName]; !ok {
// create dummy interface if devName is not found in localAddress map
h.localAddresses[devName] = make([]string, 0)
return false, nil
}
// return true if devName is already created in localAddress map
return true, nil
}
// DeleteDummyDevice is a mock implementation
func (h *FakeNetlinkHandle) DeleteDummyDevice(devName string) error {
if len(devName) == 0 {
return fmt.Errorf("Device name can't be empty")
}
if _, ok := h.localAddresses[devName]; !ok {
return fmt.Errorf("Error deleting a non-exist interface: %s", devName)
}
delete(h.localAddresses, devName)
return nil
}
// ListBindAddress is a mock implementation
func (h *FakeNetlinkHandle) ListBindAddress(devName string) ([]string, error) {
if len(devName) == 0 {
return nil, fmt.Errorf("Device name can't be empty")
}
if _, ok := h.localAddresses[devName]; !ok {
return nil, fmt.Errorf("Error list addresses from a non-exist interface: %s", devName)
}
return h.localAddresses[devName], nil
}
// GetLocalAddresses is a mock implementation
func (h *FakeNetlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
func (h *FakeNetlinkHandle) GetLocalAddresses(dev, filterDev string) (sets.String, error) {
res := sets.NewString()
if len(filterDev) != 0 {
if len(dev) != 0 {
// list all addresses from a given network interface.
for _, addr := range h.localAddresses[filterDev] {
for _, addr := range h.localAddresses[dev] {
res.Insert(addr)
}
return res, nil
}
// If filterDev is not given, will list all addresses from all available network interface.
for linkName := range h.localAddresses {
if linkName == filterDev {
continue
}
// list all addresses from a given network interface.
for _, addr := range h.localAddresses[linkName] {
res.Insert(addr)

View File

@ -27,21 +27,21 @@ func TestSetGetLocalAddresses(t *testing.T) {
fake := NewFakeNetlinkHandle()
fake.SetLocalAddresses("eth0", "1.2.3.4")
expected := sets.NewString("1.2.3.4")
addr, _ := fake.GetLocalAddresses("eth0")
addr, _ := fake.GetLocalAddresses("eth0", "")
if !reflect.DeepEqual(expected, addr) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, addr)
}
list, _ := fake.GetLocalAddresses("")
list, _ := fake.GetLocalAddresses("", "")
if !reflect.DeepEqual(expected, list) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, list)
}
fake.SetLocalAddresses("lo", "127.0.0.1")
expected = sets.NewString("127.0.0.1")
addr, _ = fake.GetLocalAddresses("lo")
addr, _ = fake.GetLocalAddresses("lo", "")
if !reflect.DeepEqual(expected, addr) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, addr)
}
list, _ = fake.GetLocalAddresses("")
list, _ = fake.GetLocalAddresses("", "")
expected = sets.NewString("1.2.3.4", "127.0.0.1")
if !reflect.DeepEqual(expected, list) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, list)