mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor updates
This commit is contained in:
42
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/BUILD
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/BUILD
generated
vendored
@ -10,9 +10,38 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cni.go",
|
||||
"cni_others.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"cni_others.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"cni_others.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"cni_others.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"cni_others.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"cni_others.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"cni_others.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"cni_others.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"cni_others.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"cni_others.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"cni_others.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"cni_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
@ -27,7 +56,7 @@ go_library(
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//vendor/github.com/containernetworking/cni/pkg/types/020:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
@ -37,15 +66,14 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"cni_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/network/cni",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go
generated
vendored
@ -273,7 +273,9 @@ func (plugin *cniNetworkPlugin) deleteFromNetwork(network *cniNetwork, podName s
|
||||
netConf, cniNet := network.NetworkConfig, network.CNIConfig
|
||||
glog.V(4).Infof("About to del CNI network %v (type=%v)", netConf.Name, netConf.Plugins[0].Network.Type)
|
||||
err = cniNet.DelNetworkList(netConf, rt)
|
||||
if err != nil {
|
||||
// The pod may not get deleted successfully at the first time.
|
||||
// Ignore "no such file or directory" error in case the network has already been deleted in previous attempts.
|
||||
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
|
||||
glog.Errorf("Error deleting network: %v", err)
|
||||
return err
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/BUILD
generated
vendored
@ -8,7 +8,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
@ -21,10 +21,9 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["dns_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/network/dns",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
|
||||
@ -321,7 +321,7 @@ func appendDNSConfig(existingDNSConfig *runtimeapi.DNSConfig, dnsConfig *v1.PodD
|
||||
return existingDNSConfig
|
||||
}
|
||||
|
||||
// GetPodDNS returns DNS setttings for the pod.
|
||||
// GetPodDNS returns DNS settings for the pod.
|
||||
func (c *Configurer) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) {
|
||||
dnsConfig, err := c.getHostDNSConfig(pod)
|
||||
if err != nil {
|
||||
|
132
vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns_test.go
generated
vendored
132
vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns_test.go
generated
vendored
@ -18,7 +18,9 @@ package dns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@ -28,7 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -487,64 +489,134 @@ func TestGetPodDNSCustom(t *testing.T) {
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
clusterNS := "203.0.113.1"
|
||||
|
||||
testPodNamespace := "testNS"
|
||||
testClusterNameserver := "10.0.0.10"
|
||||
testClusterDNSDomain := "kubernetes.io"
|
||||
testClusterDNS := []net.IP{net.ParseIP(clusterNS)}
|
||||
testOptionValue := "3"
|
||||
testSvcDomain := fmt.Sprintf("svc.%s", testClusterDNSDomain)
|
||||
testNsSvcDomain := fmt.Sprintf("%s.svc.%s", testPodNamespace, testClusterDNSDomain)
|
||||
testNdotsOptionValue := "3"
|
||||
testHostNameserver := "8.8.8.8"
|
||||
testHostDomain := "host.domain"
|
||||
|
||||
configurer := NewConfigurer(recorder, nodeRef, nil, testClusterDNS, testClusterDNSDomain, "")
|
||||
|
||||
pod := &v1.Pod{
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "",
|
||||
Name: "test_pod",
|
||||
Namespace: "testNS",
|
||||
Annotations: map[string]string{},
|
||||
Name: "test_pod",
|
||||
Namespace: testPodNamespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
DNSPolicy: v1.DNSClusterFirst,
|
||||
},
|
||||
}
|
||||
clusterFirstDNSConfig, err := configurer.GetPodDNS(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Preparing clusterFirstDNSConfig: GetPodDNS(%v), unexpected error: %v", pod, err)
|
||||
}
|
||||
|
||||
// Overwrite DNSPolicy for testing.
|
||||
pod.Spec.DNSPolicy = v1.DNSNone
|
||||
resolvConfContent := []byte(fmt.Sprintf("nameserver %s\nsearch %s\n", testHostNameserver, testHostDomain))
|
||||
tmpfile, err := ioutil.TempFile("", "tmpResolvConf")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
if _, err := tmpfile.Write(resolvConfContent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
configurer := NewConfigurer(recorder, nodeRef, nil, []net.IP{net.ParseIP(testClusterNameserver)}, testClusterDNSDomain, tmpfile.Name())
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
customPodDNSFeatureGate bool
|
||||
hostnetwork bool
|
||||
dnsPolicy v1.DNSPolicy
|
||||
dnsConfig *v1.PodDNSConfig
|
||||
expectedDNSConfig *runtimeapi.DNSConfig
|
||||
}{
|
||||
{
|
||||
desc: "feature gate is disabled, DNSNone should fallback to DNSClusterFirst",
|
||||
expectedDNSConfig: clusterFirstDNSConfig,
|
||||
desc: "feature gate is disabled, DNSNone should fallback to DNSClusterFirst",
|
||||
dnsPolicy: v1.DNSNone,
|
||||
expectedDNSConfig: &runtimeapi.DNSConfig{
|
||||
Servers: []string{testClusterNameserver},
|
||||
Searches: []string{testNsSvcDomain, testSvcDomain, testClusterDNSDomain, testHostDomain},
|
||||
Options: []string{"ndots:5"},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "feature gate is enabled, DNSNone without DNSConfig should have empty DNS settings",
|
||||
customPodDNSFeatureGate: true,
|
||||
dnsPolicy: v1.DNSNone,
|
||||
expectedDNSConfig: &runtimeapi.DNSConfig{},
|
||||
},
|
||||
{
|
||||
desc: "feature gate is enabled, DNSNone with DNSConfig should have a merged DNS settings",
|
||||
customPodDNSFeatureGate: true,
|
||||
dnsPolicy: v1.DNSNone,
|
||||
dnsConfig: &v1.PodDNSConfig{
|
||||
Nameservers: []string{"10.0.0.10"},
|
||||
Nameservers: []string{"203.0.113.1"},
|
||||
Searches: []string{"my.domain", "second.domain"},
|
||||
Options: []v1.PodDNSConfigOption{
|
||||
{Name: "ndots", Value: &testOptionValue},
|
||||
{Name: "ndots", Value: &testNdotsOptionValue},
|
||||
{Name: "debug"},
|
||||
},
|
||||
},
|
||||
expectedDNSConfig: &runtimeapi.DNSConfig{
|
||||
Servers: []string{"10.0.0.10"},
|
||||
Servers: []string{"203.0.113.1"},
|
||||
Searches: []string{"my.domain", "second.domain"},
|
||||
Options: []string{"ndots:3", "debug"},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "feature gate is enabled, DNSClusterFirst with DNSConfig should have a merged DNS settings",
|
||||
customPodDNSFeatureGate: true,
|
||||
dnsPolicy: v1.DNSClusterFirst,
|
||||
dnsConfig: &v1.PodDNSConfig{
|
||||
Nameservers: []string{"10.0.0.11"},
|
||||
Searches: []string{"my.domain"},
|
||||
Options: []v1.PodDNSConfigOption{
|
||||
{Name: "ndots", Value: &testNdotsOptionValue},
|
||||
{Name: "debug"},
|
||||
},
|
||||
},
|
||||
expectedDNSConfig: &runtimeapi.DNSConfig{
|
||||
Servers: []string{testClusterNameserver, "10.0.0.11"},
|
||||
Searches: []string{testNsSvcDomain, testSvcDomain, testClusterDNSDomain, testHostDomain, "my.domain"},
|
||||
Options: []string{"ndots:3", "debug"},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "feature gate is enabled, DNSClusterFirstWithHostNet with DNSConfig should have a merged DNS settings",
|
||||
customPodDNSFeatureGate: true,
|
||||
hostnetwork: true,
|
||||
dnsPolicy: v1.DNSClusterFirstWithHostNet,
|
||||
dnsConfig: &v1.PodDNSConfig{
|
||||
Nameservers: []string{"10.0.0.11"},
|
||||
Searches: []string{"my.domain"},
|
||||
Options: []v1.PodDNSConfigOption{
|
||||
{Name: "ndots", Value: &testNdotsOptionValue},
|
||||
{Name: "debug"},
|
||||
},
|
||||
},
|
||||
expectedDNSConfig: &runtimeapi.DNSConfig{
|
||||
Servers: []string{testClusterNameserver, "10.0.0.11"},
|
||||
Searches: []string{testNsSvcDomain, testSvcDomain, testClusterDNSDomain, testHostDomain, "my.domain"},
|
||||
Options: []string{"ndots:3", "debug"},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "feature gate is enabled, DNSDefault with DNSConfig should have a merged DNS settings",
|
||||
customPodDNSFeatureGate: true,
|
||||
dnsPolicy: v1.DNSDefault,
|
||||
dnsConfig: &v1.PodDNSConfig{
|
||||
Nameservers: []string{"10.0.0.11"},
|
||||
Searches: []string{"my.domain"},
|
||||
Options: []v1.PodDNSConfigOption{
|
||||
{Name: "ndots", Value: &testNdotsOptionValue},
|
||||
{Name: "debug"},
|
||||
},
|
||||
},
|
||||
expectedDNSConfig: &runtimeapi.DNSConfig{
|
||||
Servers: []string{testHostNameserver, "10.0.0.11"},
|
||||
Searches: []string{testHostDomain, "my.domain"},
|
||||
Options: []string{"ndots:3", "debug"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@ -552,14 +624,16 @@ func TestGetPodDNSCustom(t *testing.T) {
|
||||
t.Errorf("Failed to set CustomPodDNS feature gate: %v", err)
|
||||
}
|
||||
|
||||
pod.Spec.DNSConfig = tc.dnsConfig
|
||||
testPod.Spec.HostNetwork = tc.hostnetwork
|
||||
testPod.Spec.DNSConfig = tc.dnsConfig
|
||||
testPod.Spec.DNSPolicy = tc.dnsPolicy
|
||||
|
||||
resDNSConfig, err := configurer.GetPodDNS(pod)
|
||||
resDNSConfig, err := configurer.GetPodDNS(testPod)
|
||||
if err != nil {
|
||||
t.Errorf("%s: GetPodDNS(%v), unexpected error: %v", tc.desc, pod, err)
|
||||
t.Errorf("%s: GetPodDNS(%v), unexpected error: %v", tc.desc, testPod, err)
|
||||
}
|
||||
if !dnsConfigsAreEqual(resDNSConfig, tc.expectedDNSConfig) {
|
||||
t.Errorf("%s: GetPodDNS(%v)=%v, want %v", tc.desc, pod, resDNSConfig, tc.expectedDNSConfig)
|
||||
t.Errorf("%s: GetPodDNS(%v)=%v, want %v", tc.desc, testPod, resDNSConfig, tc.expectedDNSConfig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/BUILD
generated
vendored
@ -19,8 +19,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["hairpin_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/network/hairpin",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec/testing:go_default_library",
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/BUILD
generated
vendored
@ -17,11 +17,14 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/network/hostport",
|
||||
deps = [
|
||||
"//pkg/proxy/iptables:go_default_library",
|
||||
"//pkg/util/conntrack:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/net:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -33,12 +36,12 @@ go_test(
|
||||
"hostport_syncer_test.go",
|
||||
"hostport_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/network/hostport",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
53
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go
generated
vendored
@ -26,9 +26,13 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"
|
||||
"k8s.io/kubernetes/pkg/util/conntrack"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
utilnet "k8s.io/kubernetes/pkg/util/net"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
// HostPortManager is an interface for adding and removing hostport for a given pod sandbox.
|
||||
@ -44,18 +48,26 @@ type HostPortManager interface {
|
||||
}
|
||||
|
||||
type hostportManager struct {
|
||||
hostPortMap map[hostport]closeable
|
||||
iptables utiliptables.Interface
|
||||
portOpener hostportOpener
|
||||
mu sync.Mutex
|
||||
hostPortMap map[hostport]closeable
|
||||
execer exec.Interface
|
||||
conntrackFound bool
|
||||
iptables utiliptables.Interface
|
||||
portOpener hostportOpener
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func NewHostportManager(iptables utiliptables.Interface) HostPortManager {
|
||||
return &hostportManager{
|
||||
h := &hostportManager{
|
||||
hostPortMap: make(map[hostport]closeable),
|
||||
execer: exec.New(),
|
||||
iptables: iptables,
|
||||
portOpener: openLocalPort,
|
||||
}
|
||||
h.conntrackFound = conntrack.Exists(h.execer)
|
||||
if !h.conntrackFound {
|
||||
glog.Warningf("The binary conntrack is not installed, this can cause failures in network connection cleanup.")
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInterfaceName string) (err error) {
|
||||
@ -103,10 +115,14 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt
|
||||
}
|
||||
|
||||
newChains := []utiliptables.Chain{}
|
||||
conntrackPortsToRemove := []int{}
|
||||
for _, pm := range hostportMappings {
|
||||
protocol := strings.ToLower(string(pm.Protocol))
|
||||
chain := getHostportChain(id, pm)
|
||||
newChains = append(newChains, chain)
|
||||
if pm.Protocol == v1.ProtocolUDP {
|
||||
conntrackPortsToRemove = append(conntrackPortsToRemove, int(pm.HostPort))
|
||||
}
|
||||
|
||||
// Add new hostport chain
|
||||
writeLine(natChains, utiliptables.MakeChainLine(chain))
|
||||
@ -150,6 +166,21 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt
|
||||
// clean up opened host port if encounter any error
|
||||
return utilerrors.NewAggregate([]error{err, hm.closeHostports(hostportMappings)})
|
||||
}
|
||||
isIpv6 := utilnet.IsIPv6(podPortMapping.IP)
|
||||
|
||||
// Remove conntrack entries just after adding the new iptables rules. If the conntrack entry is removed along with
|
||||
// the IP tables rule, it can be the case that the packets received by the node after iptables rule removal will
|
||||
// create a new conntrack entry without any DNAT. That will result in blackhole of the traffic even after correct
|
||||
// iptables rules have been added back.
|
||||
if hm.execer != nil && hm.conntrackFound {
|
||||
glog.Infof("Starting to delete udp conntrack entries: %v, isIPv6 - %v", conntrackPortsToRemove, isIpv6)
|
||||
for _, port := range conntrackPortsToRemove {
|
||||
err = conntrack.ClearEntriesForPort(hm.execer, port, isIpv6, v1.ProtocolUDP)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", port, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -178,8 +209,6 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er
|
||||
chainsToRemove := []utiliptables.Chain{}
|
||||
for _, pm := range hostportMappings {
|
||||
chainsToRemove = append(chainsToRemove, getHostportChain(id, pm))
|
||||
// TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153
|
||||
chainsToRemove = append(chainsToRemove, getBuggyHostportChain(id, pm))
|
||||
}
|
||||
|
||||
// remove rules that consists of target chains
|
||||
@ -255,16 +284,6 @@ func getHostportChain(id string, pm *PortMapping) utiliptables.Chain {
|
||||
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
|
||||
}
|
||||
|
||||
// This bugy func does bad conversion on HostPort from int32 to string.
|
||||
// It may generates same chain names for different ports of the same pod, e.g. port 57119/55429/56833.
|
||||
// `getHostportChain` fixed this bug. In order to cleanup the legacy chains/rules, it is temporarily left.
|
||||
// TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153
|
||||
func getBuggyHostportChain(id string, pm *PortMapping) utiliptables.Chain {
|
||||
hash := sha256.Sum256([]byte(id + string(pm.HostPort) + string(pm.Protocol)))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
|
||||
}
|
||||
|
||||
// gatherHostportMappings returns all the PortMappings which has hostport for a pod
|
||||
func gatherHostportMappings(podPortMapping *PodPortMapping) []*PortMapping {
|
||||
mappings := []*PortMapping{}
|
||||
|
86
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager_test.go
generated
vendored
86
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager_test.go
generated
vendored
@ -25,16 +25,9 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/api/core/v1"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
func NewFakeHostportManager() HostPortManager {
|
||||
return &hostportManager{
|
||||
hostPortMap: make(map[hostport]closeable),
|
||||
iptables: NewFakeIPTables(),
|
||||
portOpener: NewFakeSocketManager().openFakeSocket,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostportManager(t *testing.T) {
|
||||
iptables := NewFakeIPTables()
|
||||
portOpener := NewFakeSocketManager()
|
||||
@ -42,6 +35,7 @@ func TestHostportManager(t *testing.T) {
|
||||
hostPortMap: make(map[hostport]closeable),
|
||||
iptables: iptables,
|
||||
portOpener: portOpener.openFakeSocket,
|
||||
execer: exec.New(),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
@ -211,79 +205,3 @@ func TestGetHostportChain(t *testing.T) {
|
||||
t.Fatal(m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostPortManagerRemoveLegacyRules(t *testing.T) {
|
||||
iptables := NewFakeIPTables()
|
||||
legacyRules := [][]string{
|
||||
{"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp --dport 8443 -j KUBE-HP-5N7UH5JAXCVP5UJR"},
|
||||
{"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod1_ns1 hostport 8081\" -m udp -p udp --dport 8081 -j KUBE-HP-7THKRFSEH4GIIXK7"},
|
||||
{"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod1_ns1 hostport 8080\" -m tcp -p tcp --dport 8080 -j KUBE-HP-4YVONL46AKYWSKS3"},
|
||||
{"-A", "OUTPUT", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
|
||||
{"-A", "PREROUTING", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
|
||||
{"-A", "POSTROUTING", "-m comment --comment \"SNAT for localhost access to hostports\" -o cbr0 -s 127.0.0.0/8 -j MASQUERADE"},
|
||||
{"-A", "KUBE-HP-4YVONL46AKYWSKS3", "-m comment --comment \"pod1_ns1 hostport 8080\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ"},
|
||||
{"-A", "KUBE-HP-4YVONL46AKYWSKS3", "-m comment --comment \"pod1_ns1 hostport 8080\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.2:80"},
|
||||
{"-A", "KUBE-HP-7THKRFSEH4GIIXK7", "-m comment --comment \"pod1_ns1 hostport 8081\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ"},
|
||||
{"-A", "KUBE-HP-7THKRFSEH4GIIXK7", "-m comment --comment \"pod1_ns1 hostport 8081\" -m udp -p udp -j DNAT --to-destination 10.1.1.2:81"},
|
||||
{"-A", "KUBE-HP-5N7UH5JAXCVP5UJR", "-m comment --comment \"pod3_ns1 hostport 8443\" -s 10.1.1.4/32 -j KUBE-MARK-MASQ"},
|
||||
{"-A", "KUBE-HP-5N7UH5JAXCVP5UJR", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.4:443"},
|
||||
}
|
||||
for _, rule := range legacyRules {
|
||||
_, err := iptables.EnsureChain(utiliptables.TableNAT, utiliptables.Chain(rule[1]))
|
||||
assert.NoError(t, err)
|
||||
_, err = iptables.ensureRule(utiliptables.RulePosition(rule[0]), utiliptables.TableNAT, utiliptables.Chain(rule[1]), rule[2])
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
portOpener := NewFakeSocketManager()
|
||||
manager := &hostportManager{
|
||||
hostPortMap: make(map[hostport]closeable),
|
||||
iptables: iptables,
|
||||
portOpener: portOpener.openFakeSocket,
|
||||
}
|
||||
err := manager.Remove("id", &PodPortMapping{
|
||||
Name: "pod1",
|
||||
Namespace: "ns1",
|
||||
IP: net.ParseIP("10.1.1.2"),
|
||||
HostNetwork: false,
|
||||
PortMappings: []*PortMapping{
|
||||
{
|
||||
HostPort: 8080,
|
||||
ContainerPort: 80,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
HostPort: 8081,
|
||||
ContainerPort: 81,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = manager.Remove("id", &PodPortMapping{
|
||||
Name: "pod3",
|
||||
Namespace: "ns1",
|
||||
IP: net.ParseIP("10.1.1.4"),
|
||||
HostNetwork: false,
|
||||
PortMappings: []*PortMapping{
|
||||
{
|
||||
HostPort: 8443,
|
||||
ContainerPort: 443,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
natTable, ok := iptables.tables[string(utiliptables.TableNAT)]
|
||||
assert.True(t, ok)
|
||||
// check KUBE-HOSTPORTS chain should be cleaned up
|
||||
hostportChain, ok := natTable.chains["KUBE-HOSTPORTS"]
|
||||
assert.True(t, ok, string(hostportChain.name))
|
||||
assert.Equal(t, 0, len(hostportChain.rules), "%v", hostportChain.rules)
|
||||
// check KUBE-HP-* chains should be deleted
|
||||
for _, name := range []string{"KUBE-HP-4YVONL46AKYWSKS3", "KUBE-HP-7THKRFSEH4GIIXK7", "KUBE-HP-5N7UH5JAXCVP5UJR"} {
|
||||
_, ok := natTable.chains[name]
|
||||
assert.False(t, ok)
|
||||
}
|
||||
}
|
||||
|
101
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/BUILD
generated
vendored
101
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/BUILD
generated
vendored
@ -10,20 +10,68 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kubenet.go",
|
||||
"kubenet_unsupported.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"kubenet_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"kubenet_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"kubenet_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"kubenet_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"kubenet_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"kubenet_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"kubenet_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"kubenet_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"kubenet_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"kubenet_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"kubenet_unsupported.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/network/kubenet",
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/network/hostport:go_default_library",
|
||||
"//pkg/util/bandwidth:go_default_library",
|
||||
"//pkg/util/dbus:go_default_library",
|
||||
@ -42,6 +90,36 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
@ -49,15 +127,14 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"kubenet_linux_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/network/kubenet",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go
generated
vendored
@ -334,20 +334,18 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube
|
||||
|
||||
// Put the container bridge into promiscuous mode to force it to accept hairpin packets.
|
||||
// TODO: Remove this once the kernel bug (#20096) is fixed.
|
||||
// TODO: check and set promiscuous mode with netlink once vishvananda/netlink supports it
|
||||
if plugin.hairpinMode == kubeletconfig.PromiscuousBridge {
|
||||
output, err := plugin.execer.Command("ip", "link", "show", "dev", BridgeName).CombinedOutput()
|
||||
if err != nil || strings.Index(string(output), "PROMISC") < 0 {
|
||||
_, err := plugin.execer.Command("ip", "link", "set", BridgeName, "promisc", "on").CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error setting promiscuous mode on %s: %v", BridgeName, err)
|
||||
}
|
||||
}
|
||||
|
||||
link, err := netlink.LinkByName(BridgeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to lookup %q: %v", BridgeName, err)
|
||||
}
|
||||
if link.Attrs().Promisc != 1 {
|
||||
// promiscuous mode is not on, then turn it on.
|
||||
err := netlink.SetPromiscOn(link)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error setting promiscuous mode on %s: %v", BridgeName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// configure the ebtables rules to eliminate duplicate packets by best effort
|
||||
plugin.syncEbtablesDedupRules(link.Attrs().HardwareAddr)
|
||||
@ -572,7 +570,7 @@ func (plugin *kubenetNetworkPlugin) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkCNIPlugin returns if all kubenet required cni plugins can be found at /opt/cni/bin or user specifed NetworkPluginDir.
|
||||
// checkCNIPlugin returns if all kubenet required cni plugins can be found at /opt/cni/bin or user specified NetworkPluginDir.
|
||||
func (plugin *kubenetNetworkPlugin) checkCNIPlugin() bool {
|
||||
if plugin.checkCNIPluginInDir(DefaultCNIDir) || plugin.checkCNIPluginInDir(plugin.vendorDir) {
|
||||
return true
|
||||
@ -767,7 +765,10 @@ func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.Netwo
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
|
||||
if err := plugin.cniConfig.DelNetwork(config, rt); err != nil {
|
||||
err = plugin.cniConfig.DelNetwork(config, rt)
|
||||
// The pod may not get deleted successfully at the first time.
|
||||
// Ignore "no such file or directory" error in case the network has already been deleted in previous attempts.
|
||||
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
|
||||
return fmt.Errorf("Error removing container from network: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/network/testing/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/network/testing/BUILD
generated
vendored
@ -29,8 +29,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["plugins_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/network/testing",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
|
Reference in New Issue
Block a user