vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -9,6 +9,7 @@ load(
go_test(
name = "go_default_test",
srcs = [
"cloud_cidr_allocator_test.go",
"controller_test.go",
"range_allocator_test.go",
"timeout_test.go",
@ -48,7 +49,9 @@ go_library(
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
"//pkg/controller/nodeipam/ipam/sync:go_default_library",
"//pkg/controller/util/node:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/taints:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",

View File

@ -52,7 +52,7 @@ func newAdapter(k8s clientset.Interface, cloud *gce.GCECloud) *adapter {
ret.recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudCIDRAllocator"})
glog.V(0).Infof("Sending events to api server.")
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
Interface: v1core.New(k8s.CoreV1().RESTClient()).Events(""),
Interface: k8s.CoreV1().Events(""),
})
return ret

View File

@ -69,6 +69,12 @@ const (
// cidrUpdateRetries is the no. of times a NodeSpec update will be retried before dropping it.
cidrUpdateRetries = 3
// updateRetryTimeout is the time to wait before requeing a failed node for retry
updateRetryTimeout = 100 * time.Millisecond
// updateMaxRetries is the max retries for a failed node
updateMaxRetries = 10
)
// CIDRAllocator is an interface implemented by things that know how

View File

@ -20,13 +20,14 @@ import (
"fmt"
"net"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
informers "k8s.io/client-go/informers/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
@ -41,9 +42,16 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
utilnode "k8s.io/kubernetes/pkg/util/node"
utiltaints "k8s.io/kubernetes/pkg/util/taints"
)
// nodeProcessingInfo tracks information related to current nodes in processing
type nodeProcessingInfo struct {
retries int
}
// cloudCIDRAllocator allocates node CIDRs according to IP address aliases
// assigned by the cloud provider. In this case, the allocation and
// deallocation is delegated to the external provider, and the controller
@ -67,7 +75,7 @@ type cloudCIDRAllocator struct {
// Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
lock sync.Mutex
nodesInProcessing sets.String
nodesInProcessing map[string]*nodeProcessingInfo
}
var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
@ -82,7 +90,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
gceCloud, ok := cloud.(*gce.GCECloud)
if !ok {
@ -97,7 +105,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
nodesSynced: nodeInformer.Informer().HasSynced,
nodeUpdateChannel: make(chan string, cidrUpdateQueueSize),
recorder: recorder,
nodesInProcessing: sets.NewString(),
nodesInProcessing: map[string]*nodeProcessingInfo{},
}
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -108,8 +116,9 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
}
// Even if PodCIDR is assigned, but NetworkUnavailable condition is
// set to true, we need to process the node to set the condition.
networkUnavailableTaint := &v1.Taint{Key: algorithm.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
_, cond := v1node.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
if cond == nil || cond.Status != v1.ConditionFalse {
if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
return ca.AllocateOrOccupyCIDR(newNode)
}
return nil
@ -147,9 +156,15 @@ func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
return
}
if err := ca.updateCIDRAllocation(workItem); err != nil {
// Requeue the failed node for update again.
ca.nodeUpdateChannel <- workItem
if ca.canRetry(workItem) {
time.AfterFunc(updateRetryTimeout, func() {
// Requeue the failed node for update again.
ca.nodeUpdateChannel <- workItem
})
continue
}
}
ca.removeNodeFromProcessing(workItem)
case <-stopChan:
return
}
@ -159,17 +174,28 @@ func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
func (ca *cloudCIDRAllocator) insertNodeToProcessing(nodeName string) bool {
ca.lock.Lock()
defer ca.lock.Unlock()
if ca.nodesInProcessing.Has(nodeName) {
if _, found := ca.nodesInProcessing[nodeName]; found {
return false
}
ca.nodesInProcessing.Insert(nodeName)
ca.nodesInProcessing[nodeName] = &nodeProcessingInfo{}
return true
}
func (ca *cloudCIDRAllocator) canRetry(nodeName string) bool {
ca.lock.Lock()
defer ca.lock.Unlock()
count := ca.nodesInProcessing[nodeName].retries + 1
if count > updateMaxRetries {
return false
}
ca.nodesInProcessing[nodeName].retries = count
return true
}
func (ca *cloudCIDRAllocator) removeNodeFromProcessing(nodeName string) {
ca.lock.Lock()
defer ca.lock.Unlock()
ca.nodesInProcessing.Delete(nodeName)
delete(ca.nodesInProcessing, nodeName)
}
// WARNING: If you're adding any return calls or defer any more work from this
@ -191,10 +217,11 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
// updateCIDRAllocation assigns CIDR to Node and sends an update to the API server.
func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
defer ca.removeNodeFromProcessing(nodeName)
node, err := ca.nodeLister.Get(nodeName)
if err != nil {
if errors.IsNotFound(err) {
return nil // node no longer available, skip processing
}
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
return err
}

View File

@ -0,0 +1,59 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipam
import (
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
)
func hasNodeInProcessing(ca *cloudCIDRAllocator, name string) bool {
ca.lock.Lock()
defer ca.lock.Unlock()
_, found := ca.nodesInProcessing[name]
return found
}
func TestBoundedRetries(t *testing.T) {
clientSet := fake.NewSimpleClientset()
updateChan := make(chan string, 1) // need to buffer as we are using only on go routine
stopChan := make(chan struct{})
sharedInfomer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
ca := &cloudCIDRAllocator{
client: clientSet,
nodeUpdateChannel: updateChan,
nodeLister: sharedInfomer.Core().V1().Nodes().Lister(),
nodesSynced: sharedInfomer.Core().V1().Nodes().Informer().HasSynced,
nodesInProcessing: map[string]*nodeProcessingInfo{},
}
go ca.worker(stopChan)
nodeName := "testNode"
ca.AllocateOrOccupyCIDR(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
})
for hasNodeInProcessing(ca, nodeName) {
// wait for node to finish processing (should terminate and not time out)
}
}

View File

@ -174,14 +174,15 @@ func (c *Controller) onAdd(node *v1.Node) error {
c.lock.Lock()
defer c.lock.Unlock()
if syncer, ok := c.syncers[node.Name]; !ok {
syncer, ok := c.syncers[node.Name]
if !ok {
syncer = c.newSyncer(node.Name)
c.syncers[node.Name] = syncer
go syncer.Loop(nil)
} else {
glog.Warningf("Add for node %q that already exists", node.Name)
syncer.Update(node)
}
syncer.Update(node)
return nil
}

View File

@ -76,7 +76,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
set, err := cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize)
if err != nil {