vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -0,0 +1,89 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"controller_test.go",
"range_allocator_test.go",
"timeout_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
"//pkg/controller/nodeipam/ipam/test:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"adapter.go",
"cidr_allocator.go",
"cloud_cidr_allocator.go",
"controller.go",
"doc.go",
"range_allocator.go",
"timeout.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam",
deps = [
"//pkg/api/v1/node:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
"//pkg/controller/nodeipam/ipam/sync:go_default_library",
"//pkg/controller/util/node:go_default_library",
"//pkg/util/node:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/nodeipam/ipam/cidrset:all-srcs",
"//pkg/controller/nodeipam/ipam/sync:all-srcs",
"//pkg/controller/nodeipam/ipam/test:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -0,0 +1,7 @@
approvers:
- bowei
- dnardo
reviewers:
- bowei
- dnardo
- freehan

View File

@ -0,0 +1,125 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipam
import (
"context"
"encoding/json"
"net"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme"
)
type adapter struct {
k8s clientset.Interface
cloud *gce.GCECloud
recorder record.EventRecorder
}
func newAdapter(k8s clientset.Interface, cloud *gce.GCECloud) *adapter {
ret := &adapter{
k8s: k8s,
cloud: cloud,
}
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(glog.Infof)
ret.recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudCIDRAllocator"})
glog.V(0).Infof("Sending events to api server.")
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
Interface: v1core.New(k8s.CoreV1().RESTClient()).Events(""),
})
return ret
}
func (a *adapter) Alias(ctx context.Context, nodeName string) (*net.IPNet, error) {
cidrs, err := a.cloud.AliasRanges(types.NodeName(nodeName))
if err != nil {
return nil, err
}
switch len(cidrs) {
case 0:
return nil, nil
case 1:
break
default:
glog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", nodeName, cidrs)
}
_, cidrRange, err := net.ParseCIDR(cidrs[0])
if err != nil {
return nil, err
}
return cidrRange, nil
}
func (a *adapter) AddAlias(ctx context.Context, nodeName string, cidrRange *net.IPNet) error {
return a.cloud.AddAliasToInstance(types.NodeName(nodeName), cidrRange)
}
func (a *adapter) Node(ctx context.Context, name string) (*v1.Node, error) {
return a.k8s.CoreV1().Nodes().Get(name, metav1.GetOptions{})
}
func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error {
patch := map[string]interface{}{
"apiVersion": node.APIVersion,
"kind": node.Kind,
"metadata": map[string]interface{}{"name": node.Name},
"spec": map[string]interface{}{"podCIDR": cidrRange.String()},
}
bytes, err := json.Marshal(patch)
if err != nil {
return err
}
_, err = a.k8s.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, bytes)
return err
}
func (a *adapter) UpdateNodeNetworkUnavailable(nodeName string, unavailable bool) error {
condition := v1.ConditionFalse
if unavailable {
condition = v1.ConditionTrue
}
return nodeutil.SetNodeCondition(a.k8s, types.NodeName(nodeName), v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: condition,
Reason: "RouteCreated",
Message: "NodeController created an implicit route",
LastTransitionTime: metav1.Now(),
})
}
func (a *adapter) EmitNodeWarningEvent(nodeName, reason, fmt string, args ...interface{}) {
ref := &v1.ObjectReference{Kind: "Node", Name: nodeName}
a.recorder.Eventf(ref, v1.EventTypeNormal, reason, fmt, args...)
}

View File

@ -0,0 +1,124 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipam
import (
"fmt"
"net"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
informers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider"
)
type nodeAndCIDR struct {
cidr *net.IPNet
nodeName string
}
// CIDRAllocatorType is the type of the allocator to use.
type CIDRAllocatorType string
const (
// RangeAllocatorType is the allocator that uses an internal CIDR
// range allocator to do node CIDR range allocations.
RangeAllocatorType CIDRAllocatorType = "RangeAllocator"
// CloudAllocatorType is the allocator that uses cloud platform
// support to do node CIDR range allocations.
CloudAllocatorType CIDRAllocatorType = "CloudAllocator"
// IPAMFromClusterAllocatorType uses the ipam controller sync'ing the node
// CIDR range allocations from the cluster to the cloud.
IPAMFromClusterAllocatorType = "IPAMFromCluster"
// IPAMFromCloudAllocatorType uses the ipam controller sync'ing the node
// CIDR range allocations from the cloud to the cluster.
IPAMFromCloudAllocatorType = "IPAMFromCloud"
)
// TODO: figure out the good setting for those constants.
const (
// The amount of time the nodecontroller polls on the list nodes endpoint.
apiserverStartupGracePeriod = 10 * time.Minute
// The no. of NodeSpec updates NC can process concurrently.
cidrUpdateWorkers = 30
// The max no. of NodeSpec updates that can be enqueued.
cidrUpdateQueueSize = 5000
// cidrUpdateRetries is the no. of times a NodeSpec update will be retried before dropping it.
cidrUpdateRetries = 3
)
// CIDRAllocator is an interface implemented by things that know how
// to allocate/occupy/recycle CIDR for nodes.
type CIDRAllocator interface {
// AllocateOrOccupyCIDR looks at the given node, assigns it a valid
// CIDR if it doesn't currently have one or mark the CIDR as used if
// the node already have one.
AllocateOrOccupyCIDR(node *v1.Node) error
// ReleaseCIDR releases the CIDR of the removed node
ReleaseCIDR(node *v1.Node) error
// Run starts all the working logic of the allocator.
Run(stopCh <-chan struct{})
}
// New creates a new CIDR range allocator.
func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, clusterCIDR, serviceCIDR *net.IPNet, nodeCIDRMaskSize int) (CIDRAllocator, error) {
nodeList, err := listNodes(kubeClient)
if err != nil {
return nil, err
}
switch allocatorType {
case RangeAllocatorType:
return NewCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDR, serviceCIDR, nodeCIDRMaskSize, nodeList)
case CloudAllocatorType:
return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
default:
return nil, fmt.Errorf("Invalid CIDR allocator type: %v", allocatorType)
}
}
func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
var nodeList *v1.NodeList
// We must poll because apiserver might not be up. This error causes
// controller manager to restart.
if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) {
var err error
nodeList, err = kubeClient.CoreV1().Nodes().List(metav1.ListOptions{
FieldSelector: fields.Everything().String(),
LabelSelector: labels.Everything().String(),
})
if err != nil {
glog.Errorf("Failed to list all nodes: %v", err)
return false, nil
}
return true, nil
}); pollErr != nil {
return nil, fmt.Errorf("Failed to list all nodes in %v, cannot proceed without updating CIDR map",
apiserverStartupGracePeriod)
}
return nodeList, nil
}

View File

@ -0,0 +1,33 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["cidr_set_test.go"],
embed = [":go_default_library"],
deps = ["//vendor/github.com/golang/glog:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["cidr_set.go"],
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,264 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cidrset
import (
"encoding/binary"
"errors"
"fmt"
"math/big"
"math/bits"
"net"
"sync"
)
// CidrSet manages a set of CIDR ranges from which blocks of IPs can
// be allocated from.
type CidrSet struct {
sync.Mutex
clusterCIDR *net.IPNet
clusterIP net.IP
clusterMaskSize int
maxCIDRs int
nextCandidate int
used big.Int
subNetMaskSize int
}
const (
// The subnet mask size cannot be greater than 16 more than the cluster mask size
// TODO: https://github.com/kubernetes/kubernetes/issues/44918
// clusterSubnetMaxDiff limited to 16 due to the uncompressed bitmap
clusterSubnetMaxDiff = 16
// halfIPv6Len is the half of the IPv6 length
halfIPv6Len = net.IPv6len / 2
)
var (
// ErrCIDRRangeNoCIDRsRemaining occurs when there is no more space
// to allocate CIDR ranges.
ErrCIDRRangeNoCIDRsRemaining = errors.New(
"CIDR allocation failed; there are no remaining CIDRs left to allocate in the accepted range")
// ErrCIDRSetSubNetTooBig occurs when the subnet mask size is too
// big compared to the CIDR mask size.
ErrCIDRSetSubNetTooBig = errors.New(
"New CIDR set failed; the node CIDR size is too big")
)
// NewCIDRSet creates a new CidrSet.
func NewCIDRSet(clusterCIDR *net.IPNet, subNetMaskSize int) (*CidrSet, error) {
clusterMask := clusterCIDR.Mask
clusterMaskSize, _ := clusterMask.Size()
var maxCIDRs int
if (clusterCIDR.IP.To4() == nil) && (subNetMaskSize-clusterMaskSize > clusterSubnetMaxDiff) {
return nil, ErrCIDRSetSubNetTooBig
}
maxCIDRs = 1 << uint32(subNetMaskSize-clusterMaskSize)
return &CidrSet{
clusterCIDR: clusterCIDR,
clusterIP: clusterCIDR.IP,
clusterMaskSize: clusterMaskSize,
maxCIDRs: maxCIDRs,
subNetMaskSize: subNetMaskSize,
}, nil
}
func (s *CidrSet) indexToCIDRBlock(index int) *net.IPNet {
var ip []byte
var mask int
switch /*v4 or v6*/ {
case s.clusterIP.To4() != nil:
{
j := uint32(index) << uint32(32-s.subNetMaskSize)
ipInt := (binary.BigEndian.Uint32(s.clusterIP)) | j
ip = make([]byte, 4)
binary.BigEndian.PutUint32(ip, ipInt)
mask = 32
}
case s.clusterIP.To16() != nil:
{
// leftClusterIP | rightClusterIP
// 2001:0DB8:1234:0000:0000:0000:0000:0000
const v6NBits = 128
const halfV6NBits = v6NBits / 2
leftClusterIP := binary.BigEndian.Uint64(s.clusterIP[:halfIPv6Len])
rightClusterIP := binary.BigEndian.Uint64(s.clusterIP[halfIPv6Len:])
leftIP, rightIP := make([]byte, halfIPv6Len), make([]byte, halfIPv6Len)
if s.subNetMaskSize <= halfV6NBits {
// We only care about left side IP
leftClusterIP |= uint64(index) << uint(halfV6NBits-s.subNetMaskSize)
} else {
if s.clusterMaskSize < halfV6NBits {
// see how many bits are needed to reach the left side
btl := uint(s.subNetMaskSize - halfV6NBits)
indexMaxBit := uint(64 - bits.LeadingZeros64(uint64(index)))
if indexMaxBit > btl {
leftClusterIP |= uint64(index) >> btl
}
}
// the right side will be calculated the same way either the
// subNetMaskSize affects both left and right sides
rightClusterIP |= uint64(index) << uint(v6NBits-s.subNetMaskSize)
}
binary.BigEndian.PutUint64(leftIP, leftClusterIP)
binary.BigEndian.PutUint64(rightIP, rightClusterIP)
ip = append(leftIP, rightIP...)
mask = 128
}
}
return &net.IPNet{
IP: ip,
Mask: net.CIDRMask(s.subNetMaskSize, mask),
}
}
// AllocateNext allocates the next free CIDR range. This will set the range
// as occupied and return the allocated range.
func (s *CidrSet) AllocateNext() (*net.IPNet, error) {
s.Lock()
defer s.Unlock()
nextUnused := -1
for i := 0; i < s.maxCIDRs; i++ {
candidate := (i + s.nextCandidate) % s.maxCIDRs
if s.used.Bit(candidate) == 0 {
nextUnused = candidate
break
}
}
if nextUnused == -1 {
return nil, ErrCIDRRangeNoCIDRsRemaining
}
s.nextCandidate = (nextUnused + 1) % s.maxCIDRs
s.used.SetBit(&s.used, nextUnused, 1)
return s.indexToCIDRBlock(nextUnused), nil
}
func (s *CidrSet) getBeginingAndEndIndices(cidr *net.IPNet) (begin, end int, err error) {
begin, end = 0, s.maxCIDRs-1
cidrMask := cidr.Mask
maskSize, _ := cidrMask.Size()
var ipSize int
if cidr == nil {
return -1, -1, fmt.Errorf("Error getting indices for cluster cidr %v, cidr is nil", s.clusterCIDR)
}
if !s.clusterCIDR.Contains(cidr.IP.Mask(s.clusterCIDR.Mask)) && !cidr.Contains(s.clusterCIDR.IP.Mask(cidr.Mask)) {
return -1, -1, fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, s.clusterCIDR)
}
if s.clusterMaskSize < maskSize {
ipSize = net.IPv4len
if cidr.IP.To4() == nil {
ipSize = net.IPv6len
}
subNetMask := net.CIDRMask(s.subNetMaskSize, ipSize*8)
begin, err = s.getIndexForCIDR(&net.IPNet{
IP: cidr.IP.Mask(subNetMask),
Mask: subNetMask,
})
if err != nil {
return -1, -1, err
}
ip := make([]byte, ipSize)
if cidr.IP.To4() != nil {
ipInt := binary.BigEndian.Uint32(cidr.IP) | (^binary.BigEndian.Uint32(cidr.Mask))
binary.BigEndian.PutUint32(ip, ipInt)
} else {
// ipIntLeft | ipIntRight
// 2001:0DB8:1234:0000:0000:0000:0000:0000
ipIntLeft := binary.BigEndian.Uint64(cidr.IP[:net.IPv6len/2]) | (^binary.BigEndian.Uint64(cidr.Mask[:net.IPv6len/2]))
ipIntRight := binary.BigEndian.Uint64(cidr.IP[net.IPv6len/2:]) | (^binary.BigEndian.Uint64(cidr.Mask[net.IPv6len/2:]))
binary.BigEndian.PutUint64(ip[:net.IPv6len/2], ipIntLeft)
binary.BigEndian.PutUint64(ip[net.IPv6len/2:], ipIntRight)
}
end, err = s.getIndexForCIDR(&net.IPNet{
IP: net.IP(ip).Mask(subNetMask),
Mask: subNetMask,
})
if err != nil {
return -1, -1, err
}
}
return begin, end, nil
}
// Release releases the given CIDR range.
func (s *CidrSet) Release(cidr *net.IPNet) error {
begin, end, err := s.getBeginingAndEndIndices(cidr)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i := begin; i <= end; i++ {
s.used.SetBit(&s.used, i, 0)
}
return nil
}
// Occupy marks the given CIDR range as used. Occupy does not check if the CIDR
// range was previously used.
func (s *CidrSet) Occupy(cidr *net.IPNet) (err error) {
begin, end, err := s.getBeginingAndEndIndices(cidr)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i := begin; i <= end; i++ {
s.used.SetBit(&s.used, i, 1)
}
return nil
}
func (s *CidrSet) getIndexForCIDR(cidr *net.IPNet) (int, error) {
return s.getIndexForIP(cidr.IP)
}
func (s *CidrSet) getIndexForIP(ip net.IP) (int, error) {
if ip.To4() != nil {
cidrIndex := (binary.BigEndian.Uint32(s.clusterIP) ^ binary.BigEndian.Uint32(ip.To4())) >> uint32(32-s.subNetMaskSize)
if cidrIndex >= uint32(s.maxCIDRs) {
return 0, fmt.Errorf("CIDR: %v/%v is out of the range of CIDR allocator", ip, s.subNetMaskSize)
}
return int(cidrIndex), nil
}
if ip.To16() != nil {
bigIP := big.NewInt(0).SetBytes(s.clusterIP)
bigIP = bigIP.Xor(bigIP, big.NewInt(0).SetBytes(ip))
cidrIndexBig := bigIP.Rsh(bigIP, uint(net.IPv6len*8-s.subNetMaskSize))
cidrIndex := cidrIndexBig.Uint64()
if cidrIndex >= uint64(s.maxCIDRs) {
return 0, fmt.Errorf("CIDR: %v/%v is out of the range of CIDR allocator", ip, s.subNetMaskSize)
}
return int(cidrIndex), nil
}
return 0, fmt.Errorf("invalid IP: %v", ip)
}

View File

@ -0,0 +1,733 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cidrset
import (
"math/big"
"net"
"reflect"
"testing"
"github.com/golang/glog"
)
func TestCIDRSetFullyAllocated(t *testing.T) {
cases := []struct {
clusterCIDRStr string
subNetMaskSize int
expectedCIDR string
description string
}{
{
clusterCIDRStr: "127.123.234.0/30",
subNetMaskSize: 30,
expectedCIDR: "127.123.234.0/30",
description: "Fully allocated CIDR with IPv4",
},
{
clusterCIDRStr: "beef:1234::/30",
subNetMaskSize: 30,
expectedCIDR: "beef:1234::/30",
description: "Fully allocated CIDR with IPv6",
},
}
for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
p, err := a.AllocateNext()
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
if p.String() != tc.expectedCIDR {
t.Fatalf("unexpected allocated cidr: %v, expecting %v for %v",
p.String(), tc.expectedCIDR, tc.description)
}
_, err = a.AllocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
}
a.Release(p)
p, err = a.AllocateNext()
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
if p.String() != tc.expectedCIDR {
t.Fatalf("unexpected allocated cidr: %v, expecting %v for %v",
p.String(), tc.expectedCIDR, tc.description)
}
_, err = a.AllocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
}
}
}
func TestIndexToCIDRBlock(t *testing.T) {
cases := []struct {
clusterCIDRStr string
subnetMaskSize int
index int
CIDRBlock string
description string
}{
{
clusterCIDRStr: "127.123.3.0/16",
subnetMaskSize: 24,
index: 0,
CIDRBlock: "127.123.0.0/24",
description: "1st IP address indexed with IPv4",
},
{
clusterCIDRStr: "127.123.0.0/16",
subnetMaskSize: 24,
index: 15,
CIDRBlock: "127.123.15.0/24",
description: "16th IP address indexed with IPv4",
},
{
clusterCIDRStr: "192.168.5.219/28",
subnetMaskSize: 32,
index: 5,
CIDRBlock: "192.168.5.213/32",
description: "5th IP address indexed with IPv4",
},
{
clusterCIDRStr: "2001:0db8:1234:3::/48",
subnetMaskSize: 64,
index: 0,
CIDRBlock: "2001:db8:1234::/64",
description: "1st IP address indexed with IPv6 /64",
},
{
clusterCIDRStr: "2001:0db8:1234::/48",
subnetMaskSize: 64,
index: 15,
CIDRBlock: "2001:db8:1234:f::/64",
description: "16th IP address indexed with IPv6 /64",
},
{
clusterCIDRStr: "2001:0db8:85a3::8a2e:0370:7334/50",
subnetMaskSize: 63,
index: 6425,
CIDRBlock: "2001:db8:85a3:3232::/63",
description: "6426th IP address indexed with IPv6 /63",
},
{
clusterCIDRStr: "2001:0db8::/32",
subnetMaskSize: 48,
index: 0,
CIDRBlock: "2001:db8::/48",
description: "1st IP address indexed with IPv6 /48",
},
{
clusterCIDRStr: "2001:0db8::/32",
subnetMaskSize: 48,
index: 15,
CIDRBlock: "2001:db8:f::/48",
description: "16th IP address indexed with IPv6 /48",
},
{
clusterCIDRStr: "2001:0db8:85a3::8a2e:0370:7334/32",
subnetMaskSize: 48,
index: 6425,
CIDRBlock: "2001:db8:1919::/48",
description: "6426th IP address indexed with IPv6 /48",
},
{
clusterCIDRStr: "2001:0db8:1234:ff00::/56",
subnetMaskSize: 72,
index: 0,
CIDRBlock: "2001:db8:1234:ff00::/72",
description: "1st IP address indexed with IPv6 /72",
},
{
clusterCIDRStr: "2001:0db8:1234:ff00::/56",
subnetMaskSize: 72,
index: 15,
CIDRBlock: "2001:db8:1234:ff00:f00::/72",
description: "16th IP address indexed with IPv6 /72",
},
{
clusterCIDRStr: "2001:0db8:1234:ff00::0370:7334/56",
subnetMaskSize: 72,
index: 6425,
CIDRBlock: "2001:db8:1234:ff19:1900::/72",
description: "6426th IP address indexed with IPv6 /72",
},
{
clusterCIDRStr: "2001:0db8:1234:0:1234::/80",
subnetMaskSize: 96,
index: 0,
CIDRBlock: "2001:db8:1234:0:1234::/96",
description: "1st IP address indexed with IPv6 /96",
},
{
clusterCIDRStr: "2001:0db8:1234:0:1234::/80",
subnetMaskSize: 96,
index: 15,
CIDRBlock: "2001:db8:1234:0:1234:f::/96",
description: "16th IP address indexed with IPv6 /96",
},
{
clusterCIDRStr: "2001:0db8:1234:ff00::0370:7334/80",
subnetMaskSize: 96,
index: 6425,
CIDRBlock: "2001:db8:1234:ff00:0:1919::/96",
description: "6426th IP address indexed with IPv6 /96",
},
}
for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, tc.subnetMaskSize)
if err != nil {
t.Fatalf("error for %v ", tc.description)
}
cidr := a.indexToCIDRBlock(tc.index)
if cidr.String() != tc.CIDRBlock {
t.Fatalf("error for %v index %d %s", tc.description, tc.index, cidr.String())
}
}
}
func TestCIDRSet_RandomishAllocation(t *testing.T) {
cases := []struct {
clusterCIDRStr string
description string
}{
{
clusterCIDRStr: "127.123.234.0/16",
description: "RandomishAllocation with IPv4",
},
{
clusterCIDRStr: "beef:1234::/16",
description: "RandomishAllocation with IPv6",
},
}
for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
}
// allocate all the CIDRs
var cidrs []*net.IPNet
for i := 0; i < 256; i++ {
if c, err := a.AllocateNext(); err == nil {
cidrs = append(cidrs, c)
} else {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
}
//var err error
_, err = a.AllocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
}
// release them all
for i := 0; i < len(cidrs); i++ {
a.Release(cidrs[i])
}
// allocate the CIDRs again
var rcidrs []*net.IPNet
for i := 0; i < 256; i++ {
if c, err := a.AllocateNext(); err == nil {
rcidrs = append(rcidrs, c)
} else {
t.Fatalf("unexpected error: %d, %v for %v", i, err, tc.description)
}
}
_, err = a.AllocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
}
if !reflect.DeepEqual(cidrs, rcidrs) {
t.Fatalf("expected re-allocated cidrs are the same collection for %v", tc.description)
}
}
}
func TestCIDRSet_AllocationOccupied(t *testing.T) {
cases := []struct {
clusterCIDRStr string
description string
}{
{
clusterCIDRStr: "127.123.234.0/16",
description: "AllocationOccupied with IPv4",
},
{
clusterCIDRStr: "beef:1234::/16",
description: "AllocationOccupied with IPv6",
},
}
for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
}
// allocate all the CIDRs
var cidrs []*net.IPNet
var numCIDRs = 256
for i := 0; i < numCIDRs; i++ {
if c, err := a.AllocateNext(); err == nil {
cidrs = append(cidrs, c)
} else {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
}
//var err error
_, err = a.AllocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
}
// release them all
for i := 0; i < len(cidrs); i++ {
a.Release(cidrs[i])
}
// occupy the last 128 CIDRs
for i := numCIDRs / 2; i < numCIDRs; i++ {
a.Occupy(cidrs[i])
}
// allocate the first 128 CIDRs again
var rcidrs []*net.IPNet
for i := 0; i < numCIDRs/2; i++ {
if c, err := a.AllocateNext(); err == nil {
rcidrs = append(rcidrs, c)
} else {
t.Fatalf("unexpected error: %d, %v for %v", i, err, tc.description)
}
}
_, err = a.AllocateNext()
if err == nil {
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
}
// check Occupy() work properly
for i := numCIDRs / 2; i < numCIDRs; i++ {
rcidrs = append(rcidrs, cidrs[i])
}
if !reflect.DeepEqual(cidrs, rcidrs) {
t.Fatalf("expected re-allocated cidrs are the same collection for %v", tc.description)
}
}
}
func TestGetBitforCIDR(t *testing.T) {
cases := []struct {
clusterCIDRStr string
subNetMaskSize int
subNetCIDRStr string
expectedBit int
expectErr bool
description string
}{
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/16",
expectedBit: 0,
expectErr: false,
description: "Get 0 Bit with IPv4",
},
{
clusterCIDRStr: "be00::/8",
subNetMaskSize: 16,
subNetCIDRStr: "be00::/16",
expectedBit: 0,
expectErr: false,
description: "Get 0 Bit with IPv6",
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.123.0.0/16",
expectedBit: 123,
expectErr: false,
description: "Get 123rd Bit with IPv4",
},
{
clusterCIDRStr: "be00::/8",
subNetMaskSize: 16,
subNetCIDRStr: "beef::/16",
expectedBit: 0xef,
expectErr: false,
description: "Get xef Bit with IPv6",
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.168.0.0/16",
expectedBit: 168,
expectErr: false,
description: "Get 168th Bit with IPv4",
},
{
clusterCIDRStr: "be00::/8",
subNetMaskSize: 16,
subNetCIDRStr: "be68::/16",
expectedBit: 0x68,
expectErr: false,
description: "Get x68th Bit with IPv6",
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.224.0.0/16",
expectedBit: 224,
expectErr: false,
description: "Get 224th Bit with IPv4",
},
{
clusterCIDRStr: "be00::/8",
subNetMaskSize: 16,
subNetCIDRStr: "be24::/16",
expectedBit: 0x24,
expectErr: false,
description: "Get x24th Bit with IPv6",
},
{
clusterCIDRStr: "192.168.0.0/16",
subNetMaskSize: 24,
subNetCIDRStr: "192.168.12.0/24",
expectedBit: 12,
expectErr: false,
description: "Get 12th Bit with IPv4",
},
{
clusterCIDRStr: "beef::/16",
subNetMaskSize: 24,
subNetCIDRStr: "beef:1200::/24",
expectedBit: 0x12,
expectErr: false,
description: "Get x12th Bit with IPv6",
},
{
clusterCIDRStr: "192.168.0.0/16",
subNetMaskSize: 24,
subNetCIDRStr: "192.168.151.0/24",
expectedBit: 151,
expectErr: false,
description: "Get 151st Bit with IPv4",
},
{
clusterCIDRStr: "beef::/16",
subNetMaskSize: 24,
subNetCIDRStr: "beef:9700::/24",
expectedBit: 0x97,
expectErr: false,
description: "Get x97st Bit with IPv6",
},
{
clusterCIDRStr: "192.168.0.0/16",
subNetMaskSize: 24,
subNetCIDRStr: "127.168.224.0/24",
expectErr: true,
description: "Get error with IPv4",
},
{
clusterCIDRStr: "beef::/16",
subNetMaskSize: 24,
subNetCIDRStr: "2001:db00::/24",
expectErr: true,
description: "Get error with IPv6",
},
}
for _, tc := range cases {
_, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
cs, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize)
if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
}
_, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
got, err := cs.getIndexForCIDR(subnetCIDR)
if err == nil && tc.expectErr {
glog.Errorf("expected error but got null for %v", tc.description)
continue
}
if err != nil && !tc.expectErr {
glog.Errorf("unexpected error: %v for %v", err, tc.description)
continue
}
if got != tc.expectedBit {
glog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description)
}
}
}
func TestOccupy(t *testing.T) {
cases := []struct {
clusterCIDRStr string
subNetMaskSize int
subNetCIDRStr string
expectedUsedBegin int
expectedUsedEnd int
expectErr bool
description string
}{
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/8",
expectedUsedBegin: 0,
expectedUsedEnd: 255,
expectErr: false,
description: "Occupy all Bits with IPv4",
},
{
clusterCIDRStr: "2001:beef:1200::/40",
subNetMaskSize: 48,
subNetCIDRStr: "2001:beef:1200::/40",
expectedUsedBegin: 0,
expectedUsedEnd: 255,
expectErr: false,
description: "Occupy all Bits with IPv6",
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/2",
expectedUsedBegin: 0,
expectedUsedEnd: 255,
expectErr: false,
description: "Occupy every Bit with IPv4",
},
{
clusterCIDRStr: "2001:beef:1200::/40",
subNetMaskSize: 48,
subNetCIDRStr: "2001:beef:1234::/34",
expectedUsedBegin: 0,
expectedUsedEnd: 255,
expectErr: false,
description: "Occupy every Bit with IPv6",
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/16",
expectedUsedBegin: 0,
expectedUsedEnd: 0,
expectErr: false,
description: "Occupy 1st Bit with IPv4",
},
{
clusterCIDRStr: "2001:beef:1200::/40",
subNetMaskSize: 48,
subNetCIDRStr: "2001:beef:1200::/48",
expectedUsedBegin: 0,
expectedUsedEnd: 0,
expectErr: false,
description: "Occupy 1st Bit with IPv6",
},
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 32,
subNetCIDRStr: "127.0.0.0/16",
expectedUsedBegin: 0,
expectedUsedEnd: 65535,
expectErr: false,
description: "Occupy 65535 Bits with IPv4",
},
{
clusterCIDRStr: "2001:beef:1200::/48",
subNetMaskSize: 64,
subNetCIDRStr: "2001:beef:1200::/48",
expectedUsedBegin: 0,
expectedUsedEnd: 65535,
expectErr: false,
description: "Occupy 65535 Bits with IPv6",
},
{
clusterCIDRStr: "127.0.0.0/7",
subNetMaskSize: 16,
subNetCIDRStr: "127.0.0.0/15",
expectedUsedBegin: 256,
expectedUsedEnd: 257,
expectErr: false,
description: "Occupy 257th Bit with IPv4",
},
{
clusterCIDRStr: "2001:beef:7f00::/39",
subNetMaskSize: 48,
subNetCIDRStr: "2001:beef:7f00::/47",
expectedUsedBegin: 256,
expectedUsedEnd: 257,
expectErr: false,
description: "Occupy 257th Bit with IPv6",
},
{
clusterCIDRStr: "127.0.0.0/7",
subNetMaskSize: 15,
subNetCIDRStr: "127.0.0.0/15",
expectedUsedBegin: 128,
expectedUsedEnd: 128,
expectErr: false,
description: "Occupy 128th Bit with IPv4",
},
{
clusterCIDRStr: "2001:beef:7f00::/39",
subNetMaskSize: 47,
subNetCIDRStr: "2001:beef:7f00::/47",
expectedUsedBegin: 128,
expectedUsedEnd: 128,
expectErr: false,
description: "Occupy 128th Bit with IPv6",
},
{
clusterCIDRStr: "127.0.0.0/7",
subNetMaskSize: 18,
subNetCIDRStr: "127.0.0.0/15",
expectedUsedBegin: 1024,
expectedUsedEnd: 1031,
expectErr: false,
description: "Occupy 1031st Bit with IPv4",
},
{
clusterCIDRStr: "2001:beef:7f00::/39",
subNetMaskSize: 50,
subNetCIDRStr: "2001:beef:7f00::/47",
expectedUsedBegin: 1024,
expectedUsedEnd: 1031,
expectErr: false,
description: "Occupy 1031st Bit with IPv6",
},
}
for _, tc := range cases {
_, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
cs, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize)
if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
}
_, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr)
if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description)
}
err = cs.Occupy(subnetCIDR)
if err == nil && tc.expectErr {
t.Errorf("expected error but got none for %v", tc.description)
continue
}
if err != nil && !tc.expectErr {
t.Errorf("unexpected error: %v for %v", err, tc.description)
continue
}
expectedUsed := big.Int{}
for i := tc.expectedUsedBegin; i <= tc.expectedUsedEnd; i++ {
expectedUsed.SetBit(&expectedUsed, i, 1)
}
if expectedUsed.Cmp(&cs.used) != 0 {
t.Errorf("error for %v", tc.description)
}
}
}
func TestCIDRSetv6(t *testing.T) {
cases := []struct {
clusterCIDRStr string
subNetMaskSize int
expectedCIDR string
expectedCIDR2 string
expectErr bool
description string
}{
{
clusterCIDRStr: "127.0.0.0/8",
subNetMaskSize: 32,
expectErr: false,
expectedCIDR: "127.0.0.0/32",
expectedCIDR2: "127.0.0.1/32",
description: "Max cluster subnet size with IPv4",
},
{
clusterCIDRStr: "beef:1234::/32",
subNetMaskSize: 49,
expectErr: true,
description: "Max cluster subnet size with IPv6",
},
{
clusterCIDRStr: "2001:beef:1234:369b::/60",
subNetMaskSize: 64,
expectedCIDR: "2001:beef:1234:3690::/64",
expectedCIDR2: "2001:beef:1234:3691::/64",
expectErr: false,
description: "Allocate a few IPv6",
},
}
for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize)
if err != nil {
if tc.expectErr {
continue
}
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
}
p, err := a.AllocateNext()
if err == nil && tc.expectErr {
t.Errorf("expected error but got none for %v", tc.description)
continue
}
if err != nil && !tc.expectErr {
t.Errorf("unexpected error: %v for %v", err, tc.description)
continue
}
if !tc.expectErr {
if p.String() != tc.expectedCIDR {
t.Fatalf("unexpected allocated cidr: %s for %v", p.String(), tc.description)
}
}
p2, err := a.AllocateNext()
if !tc.expectErr {
if p2.String() != tc.expectedCIDR2 {
t.Fatalf("unexpected allocated cidr: %s for %v", p2.String(), tc.description)
}
}
}
}

View File

@ -0,0 +1,259 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipam
import (
"fmt"
"net"
"sync"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
informers "k8s.io/client-go/informers/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
v1node "k8s.io/kubernetes/pkg/api/v1/node"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
utilnode "k8s.io/kubernetes/pkg/util/node"
)
// cloudCIDRAllocator allocates node CIDRs according to IP address aliases
// assigned by the cloud provider. In this case, the allocation and
// deallocation is delegated to the external provider, and the controller
// merely takes the assignment and updates the node spec.
type cloudCIDRAllocator struct {
client clientset.Interface
cloud *gce.GCECloud
// nodeLister is able to list/get nodes and is populated by the shared informer passed to
// NewCloudCIDRAllocator.
nodeLister corelisters.NodeLister
// nodesSynced returns true if the node shared informer has been synced at least once.
nodesSynced cache.InformerSynced
// Channel that is used to pass updating Nodes to the background.
// This increases the throughput of CIDR assignment by parallelization
// and not blocking on long operations (which shouldn't be done from
// event handlers anyway).
nodeUpdateChannel chan string
recorder record.EventRecorder
// Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
lock sync.Mutex
nodesInProcessing sets.String
}
var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
// NewCloudCIDRAllocator creates a new cloud CIDR allocator.
func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) {
if client == nil {
glog.Fatalf("kubeClient is nil when starting NodeController")
}
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")})
gceCloud, ok := cloud.(*gce.GCECloud)
if !ok {
err := fmt.Errorf("cloudCIDRAllocator does not support %v provider", cloud.ProviderName())
return nil, err
}
ca := &cloudCIDRAllocator{
client: client,
cloud: gceCloud,
nodeLister: nodeInformer.Lister(),
nodesSynced: nodeInformer.Informer().HasSynced,
nodeUpdateChannel: make(chan string, cidrUpdateQueueSize),
recorder: recorder,
nodesInProcessing: sets.NewString(),
}
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nodeutil.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR),
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
if newNode.Spec.PodCIDR == "" {
return ca.AllocateOrOccupyCIDR(newNode)
}
// Even if PodCIDR is assigned, but NetworkUnavailable condition is
// set to true, we need to process the node to set the condition.
_, cond := v1node.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
if cond == nil || cond.Status != v1.ConditionFalse {
return ca.AllocateOrOccupyCIDR(newNode)
}
return nil
}),
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR),
})
glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
return ca, nil
}
func (ca *cloudCIDRAllocator) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting cloud CIDR allocator")
defer glog.Infof("Shutting down cloud CIDR allocator")
if !controller.WaitForCacheSync("cidrallocator", stopCh, ca.nodesSynced) {
return
}
for i := 0; i < cidrUpdateWorkers; i++ {
go ca.worker(stopCh)
}
<-stopCh
}
func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
for {
select {
case workItem, ok := <-ca.nodeUpdateChannel:
if !ok {
glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
return
}
if err := ca.updateCIDRAllocation(workItem); err != nil {
// Requeue the failed node for update again.
ca.nodeUpdateChannel <- workItem
}
case <-stopChan:
return
}
}
}
func (ca *cloudCIDRAllocator) insertNodeToProcessing(nodeName string) bool {
ca.lock.Lock()
defer ca.lock.Unlock()
if ca.nodesInProcessing.Has(nodeName) {
return false
}
ca.nodesInProcessing.Insert(nodeName)
return true
}
func (ca *cloudCIDRAllocator) removeNodeFromProcessing(nodeName string) {
ca.lock.Lock()
defer ca.lock.Unlock()
ca.nodesInProcessing.Delete(nodeName)
}
// WARNING: If you're adding any return calls or defer any more work from this
// function you have to make sure to update nodesInProcessing properly with the
// disposition of the node when the work is done.
func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
if node == nil {
return nil
}
if !ca.insertNodeToProcessing(node.Name) {
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
return nil
}
glog.V(4).Infof("Putting node %s into the work queue", node.Name)
ca.nodeUpdateChannel <- node.Name
return nil
}
// updateCIDRAllocation assigns CIDR to Node and sends an update to the API server.
func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
defer ca.removeNodeFromProcessing(nodeName)
node, err := ca.nodeLister.Get(nodeName)
if err != nil {
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
return err
}
cidrs, err := ca.cloud.AliasRanges(types.NodeName(nodeName))
if err != nil {
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: %v", err)
}
if len(cidrs) == 0 {
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: Node %v has no CIDRs", node.Name)
}
_, cidr, err := net.ParseCIDR(cidrs[0])
if err != nil {
return fmt.Errorf("failed to parse string '%s' as a CIDR: %v", cidrs[0], err)
}
podCIDR := cidr.String()
if node.Spec.PodCIDR == podCIDR {
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
// We don't return here, in order to set the NetworkUnavailable condition later below.
} else {
if node.Spec.PodCIDR != "" {
glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR)
// We fall through and set the CIDR despite this error. This
// implements the same logic as implemented in the
// rangeAllocator.
//
// See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
}
for i := 0; i < cidrUpdateRetries; i++ {
if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil {
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
break
}
}
}
if err != nil {
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
return err
}
err = utilnode.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionFalse,
Reason: "RouteCreated",
Message: "NodeController create implicit route",
LastTransitionTime: metav1.Now(),
})
if err != nil {
glog.Errorf("Error setting route status for node %v: %v", node.Name, err)
}
return err
}
func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error {
glog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)",
node.Name, node.Spec.PodCIDR)
return nil
}

View File

@ -0,0 +1,215 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipam
import (
"fmt"
"net"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
informers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
)
// Config for the IPAM controller.
type Config struct {
// Resync is the default timeout duration when there are no errors.
Resync time.Duration
// MaxBackoff is the maximum timeout when in a error backoff state.
MaxBackoff time.Duration
// InitialRetry is the initial retry interval when an error is reported.
InitialRetry time.Duration
// Mode to use to synchronize.
Mode nodesync.NodeSyncMode
}
// Controller is the controller for synchronizing cluster and cloud node
// pod CIDR range assignments.
type Controller struct {
config *Config
adapter *adapter
lock sync.Mutex
syncers map[string]*nodesync.NodeSync
set *cidrset.CidrSet
}
// NewController returns a new instance of the IPAM controller.
func NewController(
config *Config,
kubeClient clientset.Interface,
cloud cloudprovider.Interface,
clusterCIDR, serviceCIDR *net.IPNet,
nodeCIDRMaskSize int) (*Controller, error) {
if !nodesync.IsValidMode(config.Mode) {
return nil, fmt.Errorf("invalid IPAM controller mode %q", config.Mode)
}
gceCloud, ok := cloud.(*gce.GCECloud)
if !ok {
return nil, fmt.Errorf("cloud IPAM controller does not support %q provider", cloud.ProviderName())
}
set, err := cidrset.NewCIDRSet(clusterCIDR, nodeCIDRMaskSize)
if err != nil {
return nil, err
}
c := &Controller{
config: config,
adapter: newAdapter(kubeClient, gceCloud),
syncers: make(map[string]*nodesync.NodeSync),
set: set,
}
if err := occupyServiceCIDR(c.set, clusterCIDR, serviceCIDR); err != nil {
return nil, err
}
return c, nil
}
// Start initializes the Controller with the existing list of nodes and
// registers the informers for node changes. This will start synchronization
// of the node and cloud CIDR range allocations.
func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
glog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config)
nodes, err := listNodes(c.adapter.k8s)
if err != nil {
return err
}
for _, node := range nodes.Items {
if node.Spec.PodCIDR != "" {
_, cidrRange, err := net.ParseCIDR(node.Spec.PodCIDR)
if err == nil {
c.set.Occupy(cidrRange)
glog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR)
} else {
glog.Errorf("Node %q has an invalid CIDR (%q): %v", node.Name, node.Spec.PodCIDR, err)
}
}
func() {
c.lock.Lock()
defer c.lock.Unlock()
// XXX/bowei -- stagger the start of each sync cycle.
syncer := c.newSyncer(node.Name)
c.syncers[node.Name] = syncer
go syncer.Loop(nil)
}()
}
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nodeutil.CreateAddNodeHandler(c.onAdd),
UpdateFunc: nodeutil.CreateUpdateNodeHandler(c.onUpdate),
DeleteFunc: nodeutil.CreateDeleteNodeHandler(c.onDelete),
})
return nil
}
// occupyServiceCIDR removes the service CIDR range from the cluster CIDR if it
// intersects.
func occupyServiceCIDR(set *cidrset.CidrSet, clusterCIDR, serviceCIDR *net.IPNet) error {
if clusterCIDR.Contains(serviceCIDR.IP) || serviceCIDR.Contains(clusterCIDR.IP) {
if err := set.Occupy(serviceCIDR); err != nil {
return err
}
}
return nil
}
type nodeState struct {
t Timeout
}
func (ns *nodeState) ReportResult(err error) {
ns.t.Update(err == nil)
}
func (ns *nodeState) ResyncTimeout() time.Duration {
return ns.t.Next()
}
func (c *Controller) newSyncer(name string) *nodesync.NodeSync {
ns := &nodeState{
Timeout{
Resync: c.config.Resync,
MaxBackoff: c.config.MaxBackoff,
InitialRetry: c.config.InitialRetry,
},
}
return nodesync.New(ns, c.adapter, c.adapter, c.config.Mode, name, c.set)
}
func (c *Controller) onAdd(node *v1.Node) error {
c.lock.Lock()
defer c.lock.Unlock()
if syncer, ok := c.syncers[node.Name]; !ok {
syncer = c.newSyncer(node.Name)
c.syncers[node.Name] = syncer
go syncer.Loop(nil)
} else {
glog.Warningf("Add for node %q that already exists", node.Name)
syncer.Update(node)
}
return nil
}
func (c *Controller) onUpdate(_, node *v1.Node) error {
c.lock.Lock()
defer c.lock.Unlock()
if sync, ok := c.syncers[node.Name]; ok {
sync.Update(node)
} else {
glog.Errorf("Received update for non-existent node %q", node.Name)
return fmt.Errorf("unknown node %q", node.Name)
}
return nil
}
func (c *Controller) onDelete(node *v1.Node) error {
c.lock.Lock()
defer c.lock.Unlock()
if syncer, ok := c.syncers[node.Name]; ok {
syncer.Delete(node)
delete(c.syncers, node.Name)
} else {
glog.Warning("Node %q was already deleted", node.Name)
}
return nil
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipam
import (
"net"
"testing"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
)
func TestOccupyServiceCIDR(t *testing.T) {
const clusterCIDR = "10.1.0.0/16"
TestCase:
for _, tc := range []struct {
serviceCIDR string
}{
{"10.0.255.0/24"},
{"10.1.0.0/24"},
{"10.1.255.0/24"},
{"10.2.0.0/24"},
} {
serviceCIDR := test.MustParseCIDR(tc.serviceCIDR)
set, err := cidrset.NewCIDRSet(test.MustParseCIDR(clusterCIDR), 24)
if err != nil {
t.Errorf("test case %+v: NewCIDRSet() = %v, want nil", tc, err)
}
if err := occupyServiceCIDR(set, test.MustParseCIDR(clusterCIDR), serviceCIDR); err != nil {
t.Errorf("test case %+v: occupyServiceCIDR() = %v, want nil", tc, err)
}
// Allocate until full.
var cidrs []*net.IPNet
for {
cidr, err := set.AllocateNext()
if err != nil {
if err == cidrset.ErrCIDRRangeNoCIDRsRemaining {
break
}
t.Errorf("set.AllocateNext() = %v, want %v", err, cidrset.ErrCIDRRangeNoCIDRsRemaining)
continue TestCase
}
cidrs = append(cidrs, cidr)
}
// No allocated CIDR range should intersect with serviceCIDR.
for _, c := range cidrs {
if c.Contains(serviceCIDR.IP) || serviceCIDR.Contains(c.IP) {
t.Errorf("test case %+v: allocated CIDR %v from service range", tc, c)
}
}
}
}

View File

@ -0,0 +1,30 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ipam provides different allocators for assigning IP ranges to nodes.
// We currently support several kinds of IPAM allocators (these are denoted by
// the CIDRAllocatorType):
// - RangeAllocator is an allocator that assigns PodCIDRs to nodes and works
// in conjunction with the RouteController to configure the network to get
// connectivity.
// - CloudAllocator is an allocator that synchronizes PodCIDRs from IP
// ranges assignments from the underlying cloud platform.
// - (Alpha only) IPAMFromCluster is an allocator that has the similar
// functionality as the RangeAllocator but also synchronizes cluster-managed
// ranges into the cloud platform.
// - (Alpha only) IPAMFromCloud is the same as CloudAllocator (synchronizes
// from cloud into the cluster.)
package ipam

View File

@ -0,0 +1,326 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipam
import (
"fmt"
"net"
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
informers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
utilnode "k8s.io/kubernetes/pkg/util/node"
)
type rangeAllocator struct {
client clientset.Interface
cidrs *cidrset.CidrSet
clusterCIDR *net.IPNet
maxCIDRs int
// nodeLister is able to list/get nodes and is populated by the shared informer passed to
// NewCloudCIDRAllocator.
nodeLister corelisters.NodeLister
// nodesSynced returns true if the node shared informer has been synced at least once.
nodesSynced cache.InformerSynced
// Channel that is used to pass updating Nodes with assigned CIDRs to the background
// This increases a throughput of CIDR assignment by not blocking on long operations.
nodeCIDRUpdateChannel chan nodeAndCIDR
recorder record.EventRecorder
// Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
lock sync.Mutex
nodesInProcessing sets.String
}
// NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
// Caller must always pass in a list of existing nodes so the new allocator
// can initialize its CIDR map. NodeList is only nil in testing.
func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
if client == nil {
glog.Fatalf("kubeClient is nil when starting NodeController")
}
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")})
set, err := cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize)
if err != nil {
return nil, err
}
ra := &rangeAllocator{
client: client,
cidrs: set,
clusterCIDR: clusterCIDR,
nodeLister: nodeInformer.Lister(),
nodesSynced: nodeInformer.Informer().HasSynced,
nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize),
recorder: recorder,
nodesInProcessing: sets.NewString(),
}
if serviceCIDR != nil {
ra.filterOutServiceRange(serviceCIDR)
} else {
glog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
}
if nodeList != nil {
for _, node := range nodeList.Items {
if node.Spec.PodCIDR == "" {
glog.Infof("Node %v has no CIDR, ignoring", node.Name)
continue
} else {
glog.Infof("Node %v has CIDR %s, occupying it in CIDR map",
node.Name, node.Spec.PodCIDR)
}
if err := ra.occupyCIDR(&node); err != nil {
// This will happen if:
// 1. We find garbage in the podCIDR field. Retrying is useless.
// 2. CIDR out of range: This means a node CIDR has changed.
// This error will keep crashing controller-manager.
return nil, err
}
}
}
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nodeutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR),
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
// If the PodCIDR is not empty we either:
// - already processed a Node that already had a CIDR after NC restarted
// (cidr is marked as used),
// - already processed a Node successfully and allocated a CIDR for it
// (cidr is marked as used),
// - already processed a Node but we did saw a "timeout" response and
// request eventually got through in this case we haven't released
// the allocated CIDR (cidr is still marked as used).
// There's a possible error here:
// - NC sees a new Node and assigns a CIDR X to it,
// - Update Node call fails with a timeout,
// - Node is updated by some other component, NC sees an update and
// assigns CIDR Y to the Node,
// - Both CIDR X and CIDR Y are marked as used in the local cache,
// even though Node sees only CIDR Y
// The problem here is that in in-memory cache we see CIDR X as marked,
// which prevents it from being assigned to any new node. The cluster
// state is correct.
// Restart of NC fixes the issue.
if newNode.Spec.PodCIDR == "" {
return ra.AllocateOrOccupyCIDR(newNode)
}
return nil
}),
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ra.ReleaseCIDR),
})
return ra, nil
}
func (r *rangeAllocator) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting range CIDR allocator")
defer glog.Infof("Shutting down range CIDR allocator")
if !controller.WaitForCacheSync("cidrallocator", stopCh, r.nodesSynced) {
return
}
for i := 0; i < cidrUpdateWorkers; i++ {
go r.worker(stopCh)
}
<-stopCh
}
func (r *rangeAllocator) worker(stopChan <-chan struct{}) {
for {
select {
case workItem, ok := <-r.nodeCIDRUpdateChannel:
if !ok {
glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
return
}
if err := r.updateCIDRAllocation(workItem); err != nil {
// Requeue the failed node for update again.
r.nodeCIDRUpdateChannel <- workItem
}
case <-stopChan:
return
}
}
}
func (r *rangeAllocator) insertNodeToProcessing(nodeName string) bool {
r.lock.Lock()
defer r.lock.Unlock()
if r.nodesInProcessing.Has(nodeName) {
return false
}
r.nodesInProcessing.Insert(nodeName)
return true
}
func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) {
r.lock.Lock()
defer r.lock.Unlock()
r.nodesInProcessing.Delete(nodeName)
}
func (r *rangeAllocator) occupyCIDR(node *v1.Node) error {
defer r.removeNodeFromProcessing(node.Name)
if node.Spec.PodCIDR == "" {
return nil
}
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
}
if err := r.cidrs.Occupy(podCIDR); err != nil {
return fmt.Errorf("failed to mark cidr as occupied: %v", err)
}
return nil
}
// WARNING: If you're adding any return calls or defer any more work from this
// function you have to make sure to update nodesInProcessing properly with the
// disposition of the node when the work is done.
func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
if node == nil {
return nil
}
if !r.insertNodeToProcessing(node.Name) {
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
return nil
}
if node.Spec.PodCIDR != "" {
return r.occupyCIDR(node)
}
podCIDR, err := r.cidrs.AllocateNext()
if err != nil {
r.removeNodeFromProcessing(node.Name)
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: %v", err)
}
glog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
r.nodeCIDRUpdateChannel <- nodeAndCIDR{
nodeName: node.Name,
cidr: podCIDR,
}
return nil
}
func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
if node == nil || node.Spec.PodCIDR == "" {
return nil
}
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err)
}
glog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
if err = r.cidrs.Release(podCIDR); err != nil {
return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err)
}
return err
}
// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used,
// so that they won't be assignable.
func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
// Checks if service CIDR has a nonempty intersection with cluster
// CIDR. It is the case if either clusterCIDR contains serviceCIDR with
// clusterCIDR's Mask applied (this means that clusterCIDR contains
// serviceCIDR) or vice versa (which means that serviceCIDR contains
// clusterCIDR).
if !r.clusterCIDR.Contains(serviceCIDR.IP.Mask(r.clusterCIDR.Mask)) && !serviceCIDR.Contains(r.clusterCIDR.IP.Mask(serviceCIDR.Mask)) {
return
}
if err := r.cidrs.Occupy(serviceCIDR); err != nil {
glog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
}
}
// updateCIDRAllocation assigns CIDR to Node and sends an update to the API server.
func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
var err error
var node *v1.Node
defer r.removeNodeFromProcessing(data.nodeName)
podCIDR := data.cidr.String()
node, err = r.nodeLister.Get(data.nodeName)
if err != nil {
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err)
return err
}
if node.Spec.PodCIDR == podCIDR {
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
return nil
}
if node.Spec.PodCIDR != "" {
glog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR)
if err := r.cidrs.Release(data.cidr); err != nil {
glog.Errorf("Error when releasing CIDR %v", podCIDR)
}
return nil
}
// If we reached here, it means that the node has no CIDR currently assigned. So we set it.
for i := 0; i < cidrUpdateRetries; i++ {
if err = utilnode.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil {
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
return nil
}
}
glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
// We accept the fact that we may leak CIDRs here. This is safer than releasing
// them in case when we don't know if request went through.
// NodeController restart will return all falsely allocated CIDRs to the pool.
if !apierrors.IsServerTimeout(err) {
glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err)
if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil {
glog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
}
}
return err
}

View File

@ -0,0 +1,432 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipam
import (
"net"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/testutil"
)
const (
nodePollInterval = 100 * time.Millisecond
)
var alwaysReady = func() bool { return true }
func waitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number int, timeout time.Duration) error {
return wait.Poll(nodePollInterval, timeout, func() (bool, error) {
if len(nodeHandler.GetUpdatedNodesCopy()) >= number {
return true, nil
}
return false, nil
})
}
// Creates a fakeNodeInformer using the provided fakeNodeHandler.
func getFakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformers.NodeInformer {
fakeClient := &fake.Clientset{}
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
for _, node := range fakeNodeHandler.Existing {
fakeNodeInformer.Informer().GetStore().Add(node)
}
return fakeNodeInformer
}
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDR string
allocatedCIDRs []string
}{
{
description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
expectedAllocatedCIDR: "127.123.234.0/30",
},
{
description: "Correctly filter out ServiceCIDR",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
return clusterCIDR
}(),
serviceCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/26")
return clusterCIDR
}(),
subNetMaskSize: 30,
// it should return first /30 CIDR after service range
expectedAllocatedCIDR: "127.123.234.64/30",
},
{
description: "Correctly ignore already allocated CIDRs",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
return clusterCIDR
}(),
serviceCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/26")
return clusterCIDR
}(),
subNetMaskSize: 30,
allocatedCIDRs: []string{"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"},
expectedAllocatedCIDR: "127.123.234.76/30",
},
}
testFunc := func(tc struct {
description string
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDR string
allocatedCIDRs []string
}) {
// Initialize the range allocator.
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
rangeAllocator, ok := allocator.(*rangeAllocator)
if !ok {
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
return
}
rangeAllocator.nodesSynced = alwaysReady
rangeAllocator.recorder = testutil.NewFakeRecorder()
go allocator.Run(wait.NeverStop)
// this is a bit of white box testing
for _, allocated := range tc.allocatedCIDRs {
_, cidr, err := net.ParseCIDR(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}
if err = rangeAllocator.cidrs.Occupy(cidr); err != nil {
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
}
}
if err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
}
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
}
found := false
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
if updatedNode.Spec.PodCIDR == tc.expectedAllocatedCIDR {
found = true
break
}
}
if !found {
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v",
tc.description, tc.expectedAllocatedCIDR, seenCIDRs)
}
}
for _, tc := range testCases {
testFunc(tc)
}
}
func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
allocatedCIDRs []string
}{
{
description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
allocatedCIDRs: []string{"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
},
}
testFunc := func(tc struct {
description string
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
allocatedCIDRs []string
}) {
// Initialize the range allocator.
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
rangeAllocator, ok := allocator.(*rangeAllocator)
if !ok {
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
return
}
rangeAllocator.nodesSynced = alwaysReady
rangeAllocator.recorder = testutil.NewFakeRecorder()
go allocator.Run(wait.NeverStop)
// this is a bit of white box testing
for _, allocated := range tc.allocatedCIDRs {
_, cidr, err := net.ParseCIDR(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}
err = rangeAllocator.cidrs.Occupy(cidr)
if err != nil {
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
}
}
if err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err == nil {
t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err)
}
// We don't expect any updates, so just sleep for some time
time.Sleep(time.Second)
if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 {
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy())
}
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
if updatedNode.Spec.PodCIDR != "" {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
}
}
if len(seenCIDRs) != 0 {
t.Errorf("%v: Seen assigned CIDRs when not expected: %v",
tc.description, seenCIDRs)
}
}
for _, tc := range testCases {
testFunc(tc)
}
}
func TestReleaseCIDRSuccess(t *testing.T) {
testCases := []struct {
description string
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDRFirstRound string
expectedAllocatedCIDRSecondRound string
allocatedCIDRs []string
cidrsToRelease []string
}{
{
description: "Correctly release preallocated CIDR",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
allocatedCIDRs: []string{"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
expectedAllocatedCIDRFirstRound: "",
cidrsToRelease: []string{"127.123.234.4/30"},
expectedAllocatedCIDRSecondRound: "127.123.234.4/30",
},
{
description: "Correctly recycle CIDR",
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
},
},
Clientset: fake.NewSimpleClientset(),
},
clusterCIDR: func() *net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
return clusterCIDR
}(),
serviceCIDR: nil,
subNetMaskSize: 30,
allocatedCIDRs: []string{"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
expectedAllocatedCIDRFirstRound: "127.123.234.0/30",
cidrsToRelease: []string{"127.123.234.0/30"},
expectedAllocatedCIDRSecondRound: "127.123.234.0/30",
},
}
testFunc := func(tc struct {
description string
fakeNodeHandler *testutil.FakeNodeHandler
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
subNetMaskSize int
expectedAllocatedCIDRFirstRound string
expectedAllocatedCIDRSecondRound string
allocatedCIDRs []string
cidrsToRelease []string
}) {
// Initialize the range allocator.
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
rangeAllocator, ok := allocator.(*rangeAllocator)
if !ok {
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
return
}
rangeAllocator.nodesSynced = alwaysReady
rangeAllocator.recorder = testutil.NewFakeRecorder()
go allocator.Run(wait.NeverStop)
// this is a bit of white box testing
for _, allocated := range tc.allocatedCIDRs {
_, cidr, err := net.ParseCIDR(allocated)
if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
}
err = rangeAllocator.cidrs.Occupy(cidr)
if err != nil {
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
}
}
err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0])
if tc.expectedAllocatedCIDRFirstRound != "" {
if err != nil {
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
}
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
}
} else {
if err == nil {
t.Fatalf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err)
}
// We don't expect any updates here
time.Sleep(time.Second)
if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 {
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy())
}
}
for _, cidrToRelease := range tc.cidrsToRelease {
nodeToRelease := v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
}
nodeToRelease.Spec.PodCIDR = cidrToRelease
err = allocator.ReleaseCIDR(&nodeToRelease)
if err != nil {
t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err)
}
}
if err = allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
}
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
}
found := false
seenCIDRs := []string{}
for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
if updatedNode.Spec.PodCIDR == tc.expectedAllocatedCIDRSecondRound {
found = true
break
}
}
if !found {
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v",
tc.description, tc.expectedAllocatedCIDRSecondRound, seenCIDRs)
}
}
for _, tc := range testCases {
testFunc(tc)
}
}

View File

@ -0,0 +1,40 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["sync.go"],
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync",
visibility = ["//visibility:public"],
deps = [
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["sync_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
"//pkg/controller/nodeipam/ipam/test:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,381 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sync
import (
"context"
"fmt"
"net"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
)
const (
// InvalidPodCIDR is the event recorded when a node is found with an
// invalid PodCIDR.
InvalidPodCIDR = "CloudCIDRAllocatorInvalidPodCIDR"
// InvalidModeEvent is the event recorded when the CIDR range cannot be
// sync'd due to the cluster running in the wrong mode.
InvalidModeEvent = "CloudCIDRAllocatorInvalidMode"
// MismatchEvent is the event recorded when the CIDR range allocated in the
// node spec does not match what has been allocated in the cloud.
MismatchEvent = "CloudCIDRAllocatorMismatch"
)
// cloudAlias is the interface to the cloud platform APIs.
type cloudAlias interface {
// Alias returns the IP alias for the node.
Alias(ctx context.Context, nodeName string) (*net.IPNet, error)
// AddAlias adds an alias to the node.
AddAlias(ctx context.Context, nodeName string, cidrRange *net.IPNet) error
}
// kubeAPI is the interface to the Kubernetes APIs.
type kubeAPI interface {
// Node returns the spec for the Node object.
Node(ctx context.Context, name string) (*v1.Node, error)
// UpdateNodePodCIDR updates the PodCIDR in the Node spec.
UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error
// UpdateNodeNetworkUnavailable updates the network unavailable status for the node.
UpdateNodeNetworkUnavailable(nodeName string, unavailable bool) error
// EmitNodeWarningEvent emits an event for the given node.
EmitNodeWarningEvent(nodeName, reason, fmt string, args ...interface{})
}
// controller is the interface to the controller.
type controller interface {
// ReportResult updates the controller with the result of the latest
// sync operation.
ReportResult(err error)
// ResyncTimeout returns the amount of time to wait before retrying
// a sync with a node.
ResyncTimeout() time.Duration
}
// NodeSyncMode is the mode the cloud CIDR allocator runs in.
type NodeSyncMode string
var (
// SyncFromCloud is the mode that synchronizes the IP allocation from the cloud
// platform to the node.
SyncFromCloud NodeSyncMode = "SyncFromCloud"
// SyncFromCluster is the mode that synchronizes the IP allocation determined
// by the k8s controller to the cloud provider.
SyncFromCluster NodeSyncMode = "SyncFromCluster"
)
// IsValidMode returns true if the given mode is valid.
func IsValidMode(m NodeSyncMode) bool {
switch m {
case SyncFromCloud:
case SyncFromCluster:
default:
return false
}
return true
}
// NodeSync synchronizes the state for a single node in the cluster.
type NodeSync struct {
c controller
cloudAlias cloudAlias
kubeAPI kubeAPI
mode NodeSyncMode
nodeName string
opChan chan syncOp
set *cidrset.CidrSet
}
// New returns a new syncer for a given node.
func New(c controller, cloudAlias cloudAlias, kubeAPI kubeAPI, mode NodeSyncMode, nodeName string, set *cidrset.CidrSet) *NodeSync {
return &NodeSync{
c: c,
cloudAlias: cloudAlias,
kubeAPI: kubeAPI,
mode: mode,
nodeName: nodeName,
opChan: make(chan syncOp, 1),
set: set,
}
}
// Loop runs the sync loop for a given node. done is an optional channel that
// is closed when the Loop() returns.
func (sync *NodeSync) Loop(done chan struct{}) {
glog.V(2).Infof("Starting sync loop for node %q", sync.nodeName)
defer func() {
if done != nil {
close(done)
}
}()
timeout := sync.c.ResyncTimeout()
delayTimer := time.NewTimer(timeout)
glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
for {
select {
case op, more := <-sync.opChan:
if !more {
glog.V(2).Infof("Stopping sync loop")
return
}
sync.c.ReportResult(op.run(sync))
if !delayTimer.Stop() {
<-delayTimer.C
}
case <-delayTimer.C:
glog.V(4).Infof("Running resync for node %q", sync.nodeName)
sync.c.ReportResult((&updateOp{}).run(sync))
}
timeout := sync.c.ResyncTimeout()
delayTimer.Reset(timeout)
glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
}
}
// Update causes an update operation on the given node. If node is nil, then
// the syncer will fetch the node spec from the API server before syncing.
//
// This method is safe to call from multiple goroutines.
func (sync *NodeSync) Update(node *v1.Node) {
sync.opChan <- &updateOp{node}
}
// Delete performs the sync operations necessary to remove the node from the
// IPAM state.
//
// This method is safe to call from multiple goroutines.
func (sync *NodeSync) Delete(node *v1.Node) {
sync.opChan <- &deleteOp{node}
close(sync.opChan)
}
// syncOp is the interface for generic sync operation.
type syncOp interface {
// run the requested sync operation.
run(sync *NodeSync) error
}
// updateOp handles creation and updates of a node.
type updateOp struct {
node *v1.Node
}
func (op *updateOp) String() string {
if op.node == nil {
return fmt.Sprintf("updateOp(nil)")
}
return fmt.Sprintf("updateOp(%q,%v)", op.node.Name, op.node.Spec.PodCIDR)
}
func (op *updateOp) run(sync *NodeSync) error {
glog.V(3).Infof("Running updateOp %+v", op)
ctx := context.Background()
if op.node == nil {
glog.V(3).Infof("Getting node spec for %q", sync.nodeName)
node, err := sync.kubeAPI.Node(ctx, sync.nodeName)
if err != nil {
glog.Errorf("Error getting node %q spec: %v", sync.nodeName, err)
return err
}
op.node = node
}
aliasRange, err := sync.cloudAlias.Alias(ctx, sync.nodeName)
if err != nil {
glog.Errorf("Error getting cloud alias for node %q: %v", sync.nodeName, err)
return err
}
switch {
case op.node.Spec.PodCIDR == "" && aliasRange == nil:
err = op.allocateRange(ctx, sync, op.node)
case op.node.Spec.PodCIDR == "" && aliasRange != nil:
err = op.updateNodeFromAlias(ctx, sync, op.node, aliasRange)
case op.node.Spec.PodCIDR != "" && aliasRange == nil:
err = op.updateAliasFromNode(ctx, sync, op.node)
case op.node.Spec.PodCIDR != "" && aliasRange != nil:
err = op.validateRange(ctx, sync, op.node, aliasRange)
}
return err
}
// validateRange checks that the allocated range and the alias range
// match.
func (op *updateOp) validateRange(ctx context.Context, sync *NodeSync, node *v1.Node, aliasRange *net.IPNet) error {
if node.Spec.PodCIDR != aliasRange.String() {
glog.Errorf("Inconsistency detected between node PodCIDR and node alias (%v != %v)",
node.Spec.PodCIDR, aliasRange)
sync.kubeAPI.EmitNodeWarningEvent(node.Name, MismatchEvent,
"Node.Spec.PodCIDR != cloud alias (%v != %v)", node.Spec.PodCIDR, aliasRange)
// User intervention is required in this case, as this is most likely due
// to the user mucking around with their VM aliases on the side.
} else {
glog.V(4).Infof("Node %q CIDR range %v is matches cloud assignment", node.Name, node.Spec.PodCIDR)
}
return nil
}
// updateNodeFromAlias updates the the node from the cloud allocated
// alias.
func (op *updateOp) updateNodeFromAlias(ctx context.Context, sync *NodeSync, node *v1.Node, aliasRange *net.IPNet) error {
if sync.mode != SyncFromCloud {
sync.kubeAPI.EmitNodeWarningEvent(node.Name, InvalidModeEvent,
"Cannot sync from cloud in mode %q", sync.mode)
return fmt.Errorf("cannot sync from cloud in mode %q", sync.mode)
}
glog.V(2).Infof("Updating node spec with alias range, node.PodCIDR = %v", aliasRange)
if err := sync.set.Occupy(aliasRange); err != nil {
glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
return err
}
if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, aliasRange); err != nil {
glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, aliasRange, err)
return err
}
glog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange)
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
return err
}
glog.V(2).Infof("Updated node %q PodCIDR from cloud alias %v", node.Name, aliasRange)
return nil
}
// updateAliasFromNode updates the cloud alias given the node allocation.
func (op *updateOp) updateAliasFromNode(ctx context.Context, sync *NodeSync, node *v1.Node) error {
if sync.mode != SyncFromCluster {
sync.kubeAPI.EmitNodeWarningEvent(
node.Name, InvalidModeEvent, "Cannot sync to cloud in mode %q", sync.mode)
return fmt.Errorf("cannot sync to cloud in mode %q", sync.mode)
}
_, aliasRange, err := net.ParseCIDR(node.Spec.PodCIDR)
if err != nil {
glog.Errorf("Could not parse PodCIDR (%q) for node %q: %v",
node.Spec.PodCIDR, node.Name, err)
return err
}
if err := sync.set.Occupy(aliasRange); err != nil {
glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
return err
}
if err := sync.cloudAlias.AddAlias(ctx, node.Name, aliasRange); err != nil {
glog.Errorf("Could not add alias %v for node %q: %v", aliasRange, node.Name, err)
return err
}
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
return err
}
glog.V(2).Infof("Updated node %q cloud alias with node spec, node.PodCIDR = %v",
node.Name, node.Spec.PodCIDR)
return nil
}
// allocateRange allocates a new range and updates both the cloud
// platform and the node allocation.
func (op *updateOp) allocateRange(ctx context.Context, sync *NodeSync, node *v1.Node) error {
if sync.mode != SyncFromCluster {
sync.kubeAPI.EmitNodeWarningEvent(node.Name, InvalidModeEvent,
"Cannot allocate CIDRs in mode %q", sync.mode)
return fmt.Errorf("controller cannot allocate CIDRS in mode %q", sync.mode)
}
cidrRange, err := sync.set.AllocateNext()
if err != nil {
return err
}
// If addAlias returns a hard error, cidrRange will be leaked as there
// is no durable record of the range. The missing space will be
// recovered on the next restart of the controller.
if err := sync.cloudAlias.AddAlias(ctx, node.Name, cidrRange); err != nil {
glog.Errorf("Could not add alias %v for node %q: %v", cidrRange, node.Name, err)
return err
}
if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, cidrRange); err != nil {
glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, cidrRange, err)
return err
}
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
return err
}
glog.V(2).Infof("Allocated PodCIDR %v for node %q", cidrRange, node.Name)
return nil
}
// deleteOp handles deletion of a node.
type deleteOp struct {
node *v1.Node
}
func (op *deleteOp) String() string {
if op.node == nil {
return fmt.Sprintf("deleteOp(nil)")
}
return fmt.Sprintf("deleteOp(%q,%v)", op.node.Name, op.node.Spec.PodCIDR)
}
func (op *deleteOp) run(sync *NodeSync) error {
glog.V(3).Infof("Running deleteOp %+v", op)
if op.node.Spec.PodCIDR == "" {
glog.V(2).Infof("Node %q was deleted, node had no PodCIDR range assigned", op.node.Name)
return nil
}
_, cidrRange, err := net.ParseCIDR(op.node.Spec.PodCIDR)
if err != nil {
glog.Errorf("Deleted node %q has an invalid podCIDR %q: %v",
op.node.Name, op.node.Spec.PodCIDR, err)
sync.kubeAPI.EmitNodeWarningEvent(op.node.Name, InvalidPodCIDR,
"Node %q has an invalid PodCIDR: %q", op.node.Name, op.node.Spec.PodCIDR)
return nil
}
sync.set.Release(cidrRange)
glog.V(2).Infof("Node %q was deleted, releasing CIDR range %v",
op.node.Name, op.node.Spec.PodCIDR)
return nil
}

View File

@ -0,0 +1,297 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sync
import (
"context"
"fmt"
"net"
"reflect"
"testing"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
"k8s.io/api/core/v1"
)
var (
_, clusterCIDRRange, _ = net.ParseCIDR("10.1.0.0/16")
)
type fakeEvent struct {
nodeName string
reason string
}
type fakeAPIs struct {
aliasRange *net.IPNet
aliasErr error
addAliasErr error
nodeRet *v1.Node
nodeErr error
updateNodeErr error
resyncTimeout time.Duration
reportChan chan struct{}
updateNodeNetworkUnavailableErr error
calls []string
events []fakeEvent
results []error
}
func (f *fakeAPIs) Alias(ctx context.Context, nodeName string) (*net.IPNet, error) {
f.calls = append(f.calls, fmt.Sprintf("alias %v", nodeName))
return f.aliasRange, f.aliasErr
}
func (f *fakeAPIs) AddAlias(ctx context.Context, nodeName string, cidrRange *net.IPNet) error {
f.calls = append(f.calls, fmt.Sprintf("addAlias %v %v", nodeName, cidrRange))
return f.addAliasErr
}
func (f *fakeAPIs) Node(ctx context.Context, name string) (*v1.Node, error) {
f.calls = append(f.calls, fmt.Sprintf("node %v", name))
return f.nodeRet, f.nodeErr
}
func (f *fakeAPIs) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error {
f.calls = append(f.calls, fmt.Sprintf("updateNode %v", node))
return f.updateNodeErr
}
func (f *fakeAPIs) UpdateNodeNetworkUnavailable(nodeName string, unavailable bool) error {
f.calls = append(f.calls, fmt.Sprintf("updateNodeNetworkUnavailable %v %v", nodeName, unavailable))
return f.updateNodeNetworkUnavailableErr
}
func (f *fakeAPIs) EmitNodeWarningEvent(nodeName, reason, fmtStr string, args ...interface{}) {
f.events = append(f.events, fakeEvent{nodeName, reason})
}
func (f *fakeAPIs) ReportResult(err error) {
glog.V(2).Infof("ReportResult %v", err)
f.results = append(f.results, err)
if f.reportChan != nil {
f.reportChan <- struct{}{}
}
}
func (f *fakeAPIs) ResyncTimeout() time.Duration {
if f.resyncTimeout == 0 {
return time.Second * 10000
}
return f.resyncTimeout
}
func (f *fakeAPIs) dumpTrace() {
for i, x := range f.calls {
glog.Infof("trace %v: %v", i, x)
}
}
var nodeWithoutCIDRRange = &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
}
var nodeWithCIDRRange = &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
Spec: v1.NodeSpec{PodCIDR: "10.1.1.0/24"},
}
func TestNodeSyncUpdate(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
desc string
mode NodeSyncMode
node *v1.Node
fake fakeAPIs
events []fakeEvent
wantError bool
}{
{
desc: "validate range ==",
mode: SyncFromCloud,
node: nodeWithCIDRRange,
fake: fakeAPIs{
aliasRange: test.MustParseCIDR(nodeWithCIDRRange.Spec.PodCIDR),
},
},
{
desc: "validate range !=",
mode: SyncFromCloud,
node: nodeWithCIDRRange,
fake: fakeAPIs{aliasRange: test.MustParseCIDR("192.168.0.0/24")},
events: []fakeEvent{{"node1", "CloudCIDRAllocatorMismatch"}},
},
{
desc: "update alias from node",
mode: SyncFromCloud,
node: nodeWithCIDRRange,
events: []fakeEvent{{"node1", "CloudCIDRAllocatorInvalidMode"}},
wantError: true,
},
{
desc: "update alias from node",
mode: SyncFromCluster,
node: nodeWithCIDRRange,
// XXX/bowei -- validation
},
{
desc: "update node from alias",
mode: SyncFromCloud,
node: nodeWithoutCIDRRange,
fake: fakeAPIs{aliasRange: test.MustParseCIDR("10.1.2.3/16")},
// XXX/bowei -- validation
},
{
desc: "update node from alias",
mode: SyncFromCluster,
node: nodeWithoutCIDRRange,
fake: fakeAPIs{aliasRange: test.MustParseCIDR("10.1.2.3/16")},
events: []fakeEvent{{"node1", "CloudCIDRAllocatorInvalidMode"}},
wantError: true,
},
{
desc: "allocate range",
mode: SyncFromCloud,
node: nodeWithoutCIDRRange,
events: []fakeEvent{{"node1", "CloudCIDRAllocatorInvalidMode"}},
wantError: true,
},
{
desc: "allocate range",
mode: SyncFromCluster,
node: nodeWithoutCIDRRange,
},
{
desc: "update with node==nil",
mode: SyncFromCluster,
node: nil,
fake: fakeAPIs{
nodeRet: nodeWithCIDRRange,
},
wantError: false,
},
} {
cidr, _ := cidrset.NewCIDRSet(clusterCIDRRange, 24)
sync := New(&tc.fake, &tc.fake, &tc.fake, tc.mode, "node1", cidr)
doneChan := make(chan struct{})
// Do a single step of the loop.
go sync.Loop(doneChan)
sync.Update(tc.node)
close(sync.opChan)
<-doneChan
tc.fake.dumpTrace()
if !reflect.DeepEqual(tc.fake.events, tc.events) {
t.Errorf("%v, %v; fake.events = %#v, want %#v", tc.desc, tc.mode, tc.fake.events, tc.events)
}
var hasError bool
for _, r := range tc.fake.results {
hasError = hasError || (r != nil)
}
if hasError != tc.wantError {
t.Errorf("%v, %v; hasError = %t, errors = %v, want %t",
tc.desc, tc.mode, hasError, tc.fake.events, tc.wantError)
}
}
}
func TestNodeSyncResync(t *testing.T) {
fake := &fakeAPIs{
nodeRet: nodeWithCIDRRange,
resyncTimeout: time.Millisecond,
reportChan: make(chan struct{}),
}
cidr, _ := cidrset.NewCIDRSet(clusterCIDRRange, 24)
sync := New(fake, fake, fake, SyncFromCluster, "node1", cidr)
doneChan := make(chan struct{})
go sync.Loop(doneChan)
<-fake.reportChan
close(sync.opChan)
// Unblock loop().
go func() {
<-fake.reportChan
}()
<-doneChan
fake.dumpTrace()
}
func TestNodeSyncDelete(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
desc string
mode NodeSyncMode
node *v1.Node
fake fakeAPIs
}{
{
desc: "delete",
mode: SyncFromCluster,
node: nodeWithCIDRRange,
},
{
desc: "delete without CIDR range",
mode: SyncFromCluster,
node: nodeWithoutCIDRRange,
},
{
desc: "delete with invalid CIDR range",
mode: SyncFromCluster,
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
Spec: v1.NodeSpec{PodCIDR: "invalid"},
},
},
} {
cidr, _ := cidrset.NewCIDRSet(clusterCIDRRange, 24)
sync := New(&tc.fake, &tc.fake, &tc.fake, tc.mode, "node1", cidr)
doneChan := make(chan struct{})
// Do a single step of the loop.
go sync.Loop(doneChan)
sync.Delete(tc.node)
<-doneChan
tc.fake.dumpTrace()
/*
if !reflect.DeepEqual(tc.fake.events, tc.events) {
t.Errorf("%v, %v; fake.events = %#v, want %#v", tc.desc, tc.mode, tc.fake.events, tc.events)
}
var hasError bool
for _, r := range tc.fake.results {
hasError = hasError || (r != nil)
}
if hasError != tc.wantError {
t.Errorf("%v, %v; hasError = %t, errors = %v, want %t",
tc.desc, tc.mode, hasError, tc.fake.events, tc.wantError)
}
*/
}
}

View File

@ -0,0 +1,22 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["utils.go"],
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,31 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"net"
)
// MustParseCIDR returns the CIDR range parsed from s or panics if the string
// cannot be parsed.
func MustParseCIDR(s string) *net.IPNet {
_, ret, err := net.ParseCIDR(s)
if err != nil {
panic(err)
}
return ret
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipam
import (
"time"
)
// Timeout manages the resync loop timing for a given node sync operation. The
// timeout changes depending on whether or not there was an error reported for
// the operation. Consecutive errors will result in exponential backoff to a
// maxBackoff timeout.
type Timeout struct {
// Resync is the default timeout duration when there are no errors.
Resync time.Duration
// MaxBackoff is the maximum timeout when in a error backoff state.
MaxBackoff time.Duration
// InitialRetry is the initial retry interval when an error is reported.
InitialRetry time.Duration
// errs is the count of consecutive errors that have occurred.
errs int
// current is the current backoff timeout.
current time.Duration
}
// Update the timeout with the current error state.
func (b *Timeout) Update(ok bool) {
if ok {
b.errs = 0
b.current = b.Resync
return
}
b.errs++
if b.errs == 1 {
b.current = b.InitialRetry
return
}
b.current *= 2
if b.current >= b.MaxBackoff {
b.current = b.MaxBackoff
}
}
// Next returns the next operation timeout given the disposition of err.
func (b *Timeout) Next() time.Duration {
if b.errs == 0 {
return b.Resync
}
return b.current
}

View File

@ -0,0 +1,57 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipam
import (
"errors"
"testing"
"time"
)
func TestTimeout(t *testing.T) {
time10s := 10 * time.Second
time5s := 5 * time.Second
timeout := &Timeout{
Resync: time10s,
MaxBackoff: time5s,
InitialRetry: time.Second,
}
for _, testStep := range []struct {
err error
want time.Duration
}{
{nil, time10s},
{nil, time10s},
{errors.New("x"), time.Second},
{errors.New("x"), 2 * time.Second},
{errors.New("x"), 4 * time.Second},
{errors.New("x"), 5 * time.Second},
{errors.New("x"), 5 * time.Second},
{nil, time10s},
{nil, time10s},
{errors.New("x"), time.Second},
{errors.New("x"), 2 * time.Second},
{nil, time10s},
} {
timeout.Update(testStep.err == nil)
next := timeout.Next()
if next != testStep.want {
t.Errorf("timeout.next(%v) = %v, want %v", testStep.err, next, testStep.want)
}
}
}