mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
37
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/BUILD
generated
vendored
37
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/BUILD
generated
vendored
@ -3,6 +3,23 @@ package(default_visibility = ["//visibility:public"])
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["node_ipam_controller_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
@ -14,20 +31,20 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/sync:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
52
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/BUILD
generated
vendored
52
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/BUILD
generated
vendored
@ -20,12 +20,12 @@ go_test(
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/test:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -43,33 +43,33 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam",
|
||||
deps = [
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/sync:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/scheme:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/adapter.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/adapter.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -31,26 +31,26 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme"
|
||||
"k8s.io/metrics/pkg/client/clientset/versioned/scheme"
|
||||
)
|
||||
|
||||
type adapter struct {
|
||||
k8s clientset.Interface
|
||||
cloud *gce.GCECloud
|
||||
cloud *gce.Cloud
|
||||
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func newAdapter(k8s clientset.Interface, cloud *gce.GCECloud) *adapter {
|
||||
func newAdapter(k8s clientset.Interface, cloud *gce.Cloud) *adapter {
|
||||
ret := &adapter{
|
||||
k8s: k8s,
|
||||
cloud: cloud,
|
||||
}
|
||||
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
broadcaster.StartLogging(klog.Infof)
|
||||
ret.recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudCIDRAllocator"})
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
|
||||
Interface: k8s.CoreV1().Events(""),
|
||||
})
|
||||
@ -70,7 +70,7 @@ func (a *adapter) Alias(ctx context.Context, nodeName string) (*net.IPNet, error
|
||||
case 1:
|
||||
break
|
||||
default:
|
||||
glog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", nodeName, cidrs)
|
||||
klog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", nodeName, cidrs)
|
||||
}
|
||||
|
||||
_, cidrRange, err := net.ParseCIDR(cidrs[0])
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidr_allocator.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidr_allocator.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -30,7 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
)
|
||||
|
||||
type nodeAndCIDR struct {
|
||||
@ -71,7 +71,10 @@ const (
|
||||
cidrUpdateRetries = 3
|
||||
|
||||
// updateRetryTimeout is the time to wait before requeing a failed node for retry
|
||||
updateRetryTimeout = 100 * time.Millisecond
|
||||
updateRetryTimeout = 250 * time.Millisecond
|
||||
|
||||
// maxUpdateRetryTimeout is the maximum amount of time between timeouts.
|
||||
maxUpdateRetryTimeout = 5 * time.Second
|
||||
|
||||
// updateMaxRetries is the max retries for a failed node
|
||||
updateMaxRetries = 10
|
||||
@ -118,7 +121,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
|
||||
LabelSelector: labels.Everything().String(),
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list all nodes: %v", err)
|
||||
klog.Errorf("Failed to list all nodes: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset/BUILD
generated
vendored
@ -10,7 +10,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cidr_set_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//vendor/github.com/golang/glog:go_default_library"],
|
||||
deps = ["//vendor/k8s.io/klog:go_default_library"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset/cidr_set_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset/cidr_set_test.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
func TestCIDRSetFullyAllocated(t *testing.T) {
|
||||
@ -478,17 +478,17 @@ func TestGetBitforCIDR(t *testing.T) {
|
||||
|
||||
got, err := cs.getIndexForCIDR(subnetCIDR)
|
||||
if err == nil && tc.expectErr {
|
||||
glog.Errorf("expected error but got null for %v", tc.description)
|
||||
klog.Errorf("expected error but got null for %v", tc.description)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && !tc.expectErr {
|
||||
glog.Errorf("unexpected error: %v for %v", err, tc.description)
|
||||
klog.Errorf("unexpected error: %v for %v", err, tc.description)
|
||||
continue
|
||||
}
|
||||
|
||||
if got != tc.expectedBit {
|
||||
glog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description)
|
||||
klog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
85
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go
generated
vendored
85
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go
generated
vendored
@ -18,12 +18,14 @@ package ipam
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -33,16 +35,15 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
v1node "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
utiltaints "k8s.io/kubernetes/pkg/util/taints"
|
||||
)
|
||||
@ -58,7 +59,7 @@ type nodeProcessingInfo struct {
|
||||
// merely takes the assignment and updates the node spec.
|
||||
type cloudCIDRAllocator struct {
|
||||
client clientset.Interface
|
||||
cloud *gce.GCECloud
|
||||
cloud *gce.Cloud
|
||||
|
||||
// nodeLister is able to list/get nodes and is populated by the shared informer passed to
|
||||
// NewCloudCIDRAllocator.
|
||||
@ -83,16 +84,16 @@ var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
|
||||
// NewCloudCIDRAllocator creates a new cloud CIDR allocator.
|
||||
func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) {
|
||||
if client == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
klog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
gceCloud, ok := cloud.(*gce.GCECloud)
|
||||
gceCloud, ok := cloud.(*gce.Cloud)
|
||||
if !ok {
|
||||
err := fmt.Errorf("cloudCIDRAllocator does not support %v provider", cloud.ProviderName())
|
||||
return nil, err
|
||||
@ -116,7 +117,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
}
|
||||
// Even if PodCIDR is assigned, but NetworkUnavailable condition is
|
||||
// set to true, we need to process the node to set the condition.
|
||||
networkUnavailableTaint := &v1.Taint{Key: algorithm.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
||||
networkUnavailableTaint := &v1.Taint{Key: schedulerapi.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
||||
_, cond := v1node.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
|
||||
if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
|
||||
return ca.AllocateOrOccupyCIDR(newNode)
|
||||
@ -126,15 +127,15 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR),
|
||||
})
|
||||
|
||||
glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||
klog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||
return ca, nil
|
||||
}
|
||||
|
||||
func (ca *cloudCIDRAllocator) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting cloud CIDR allocator")
|
||||
defer glog.Infof("Shutting down cloud CIDR allocator")
|
||||
klog.Infof("Starting cloud CIDR allocator")
|
||||
defer klog.Infof("Shutting down cloud CIDR allocator")
|
||||
|
||||
if !controller.WaitForCacheSync("cidrallocator", stopCh, ca.nodesSynced) {
|
||||
return
|
||||
@ -152,17 +153,22 @@ func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
|
||||
select {
|
||||
case workItem, ok := <-ca.nodeUpdateChannel:
|
||||
if !ok {
|
||||
glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
return
|
||||
}
|
||||
if err := ca.updateCIDRAllocation(workItem); err != nil {
|
||||
if ca.canRetry(workItem) {
|
||||
time.AfterFunc(updateRetryTimeout, func() {
|
||||
if err := ca.updateCIDRAllocation(workItem); err == nil {
|
||||
klog.V(3).Infof("Updated CIDR for %q", workItem)
|
||||
} else {
|
||||
klog.Errorf("Error updating CIDR for %q: %v", workItem, err)
|
||||
if canRetry, timeout := ca.retryParams(workItem); canRetry {
|
||||
klog.V(2).Infof("Retrying update for %q after %v", workItem, timeout)
|
||||
time.AfterFunc(timeout, func() {
|
||||
// Requeue the failed node for update again.
|
||||
ca.nodeUpdateChannel <- workItem
|
||||
})
|
||||
continue
|
||||
}
|
||||
klog.Errorf("Exceeded retry count for %q, dropping from queue", workItem)
|
||||
}
|
||||
ca.removeNodeFromProcessing(workItem)
|
||||
case <-stopChan:
|
||||
@ -181,15 +187,34 @@ func (ca *cloudCIDRAllocator) insertNodeToProcessing(nodeName string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (ca *cloudCIDRAllocator) canRetry(nodeName string) bool {
|
||||
func (ca *cloudCIDRAllocator) retryParams(nodeName string) (bool, time.Duration) {
|
||||
ca.lock.Lock()
|
||||
defer ca.lock.Unlock()
|
||||
count := ca.nodesInProcessing[nodeName].retries + 1
|
||||
|
||||
entry, ok := ca.nodesInProcessing[nodeName]
|
||||
if !ok {
|
||||
klog.Errorf("Cannot get retryParams for %q as entry does not exist", nodeName)
|
||||
return false, 0
|
||||
}
|
||||
|
||||
count := entry.retries + 1
|
||||
if count > updateMaxRetries {
|
||||
return false
|
||||
return false, 0
|
||||
}
|
||||
ca.nodesInProcessing[nodeName].retries = count
|
||||
return true
|
||||
|
||||
return true, nodeUpdateRetryTimeout(count)
|
||||
}
|
||||
|
||||
func nodeUpdateRetryTimeout(count int) time.Duration {
|
||||
timeout := updateRetryTimeout
|
||||
for i := 0; i < count && timeout < maxUpdateRetryTimeout; i++ {
|
||||
timeout *= 2
|
||||
}
|
||||
if timeout > maxUpdateRetryTimeout {
|
||||
timeout = maxUpdateRetryTimeout
|
||||
}
|
||||
return time.Duration(timeout.Nanoseconds()/2 + rand.Int63n(timeout.Nanoseconds()))
|
||||
}
|
||||
|
||||
func (ca *cloudCIDRAllocator) removeNodeFromProcessing(nodeName string) {
|
||||
@ -206,11 +231,11 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
if !ca.insertNodeToProcessing(node.Name) {
|
||||
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Putting node %s into the work queue", node.Name)
|
||||
klog.V(4).Infof("Putting node %s into the work queue", node.Name)
|
||||
ca.nodeUpdateChannel <- node.Name
|
||||
return nil
|
||||
}
|
||||
@ -222,7 +247,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil // node no longer available, skip processing
|
||||
}
|
||||
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -242,11 +267,11 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
podCIDR := cidr.String()
|
||||
|
||||
if node.Spec.PodCIDR == podCIDR {
|
||||
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
// We don't return here, in order to set the NetworkUnavailable condition later below.
|
||||
} else {
|
||||
if node.Spec.PodCIDR != "" {
|
||||
glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
klog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
// We fall through and set the CIDR despite this error. This
|
||||
// implements the same logic as implemented in the
|
||||
// rangeAllocator.
|
||||
@ -255,14 +280,14 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
}
|
||||
for i := 0; i < cidrUpdateRetries; i++ {
|
||||
if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
|
||||
glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -274,13 +299,13 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
LastTransitionTime: metav1.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Error setting route status for node %v: %v", node.Name, err)
|
||||
klog.Errorf("Error setting route status for node %v: %v", node.Name, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error {
|
||||
glog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)",
|
||||
klog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
return nil
|
||||
}
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cloud_cidr_allocator_test.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cloud_cidr_allocator_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -57,3 +58,26 @@ func TestBoundedRetries(t *testing.T) {
|
||||
// wait for node to finish processing (should terminate and not time out)
|
||||
}
|
||||
}
|
||||
|
||||
func withinExpectedRange(got time.Duration, expected time.Duration) bool {
|
||||
return got >= expected/2 && got <= 3*expected/2
|
||||
}
|
||||
|
||||
func TestNodeUpdateRetryTimeout(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
count int
|
||||
want time.Duration
|
||||
}{
|
||||
{count: 0, want: 250 * time.Millisecond},
|
||||
{count: 1, want: 500 * time.Millisecond},
|
||||
{count: 2, want: 1000 * time.Millisecond},
|
||||
{count: 3, want: 2000 * time.Millisecond},
|
||||
{count: 50, want: 5000 * time.Millisecond},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("count %d", tc.count), func(t *testing.T) {
|
||||
if got := nodeUpdateRetryTimeout(tc.count); !withinExpectedRange(got, tc.want) {
|
||||
t.Errorf("nodeUpdateRetryTimeout(tc.count) = %v; want %v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/controller.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/controller.go
generated
vendored
@ -22,13 +22,13 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync"
|
||||
@ -71,7 +71,7 @@ func NewController(
|
||||
return nil, fmt.Errorf("invalid IPAM controller mode %q", config.Mode)
|
||||
}
|
||||
|
||||
gceCloud, ok := cloud.(*gce.GCECloud)
|
||||
gceCloud, ok := cloud.(*gce.Cloud)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cloud IPAM controller does not support %q provider", cloud.ProviderName())
|
||||
}
|
||||
@ -99,7 +99,7 @@ func NewController(
|
||||
// registers the informers for node changes. This will start synchronization
|
||||
// of the node and cloud CIDR range allocations.
|
||||
func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
|
||||
glog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config)
|
||||
klog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config)
|
||||
|
||||
nodes, err := listNodes(c.adapter.k8s)
|
||||
if err != nil {
|
||||
@ -110,9 +110,9 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
|
||||
_, cidrRange, err := net.ParseCIDR(node.Spec.PodCIDR)
|
||||
if err == nil {
|
||||
c.set.Occupy(cidrRange)
|
||||
glog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR)
|
||||
klog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR)
|
||||
} else {
|
||||
glog.Errorf("Node %q has an invalid CIDR (%q): %v", node.Name, node.Spec.PodCIDR, err)
|
||||
klog.Errorf("Node %q has an invalid CIDR (%q): %v", node.Name, node.Spec.PodCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -180,7 +180,7 @@ func (c *Controller) onAdd(node *v1.Node) error {
|
||||
c.syncers[node.Name] = syncer
|
||||
go syncer.Loop(nil)
|
||||
} else {
|
||||
glog.Warningf("Add for node %q that already exists", node.Name)
|
||||
klog.Warningf("Add for node %q that already exists", node.Name)
|
||||
}
|
||||
syncer.Update(node)
|
||||
|
||||
@ -194,7 +194,7 @@ func (c *Controller) onUpdate(_, node *v1.Node) error {
|
||||
if sync, ok := c.syncers[node.Name]; ok {
|
||||
sync.Update(node)
|
||||
} else {
|
||||
glog.Errorf("Received update for non-existent node %q", node.Name)
|
||||
klog.Errorf("Received update for non-existent node %q", node.Name)
|
||||
return fmt.Errorf("unknown node %q", node.Name)
|
||||
}
|
||||
|
||||
@ -209,7 +209,7 @@ func (c *Controller) onDelete(node *v1.Node) error {
|
||||
syncer.Delete(node)
|
||||
delete(c.syncers, node.Name)
|
||||
} else {
|
||||
glog.Warning("Node %q was already deleted", node.Name)
|
||||
klog.Warningf("Node %q was already deleted", node.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
44
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/range_allocator.go
generated
vendored
44
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/range_allocator.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -69,13 +69,13 @@ type rangeAllocator struct {
|
||||
// can initialize its CIDR map. NodeList is only nil in testing.
|
||||
func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
||||
if client == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
klog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
set, err := cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize)
|
||||
@ -96,16 +96,16 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
if serviceCIDR != nil {
|
||||
ra.filterOutServiceRange(serviceCIDR)
|
||||
} else {
|
||||
glog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
|
||||
klog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
|
||||
}
|
||||
|
||||
if nodeList != nil {
|
||||
for _, node := range nodeList.Items {
|
||||
if node.Spec.PodCIDR == "" {
|
||||
glog.Infof("Node %v has no CIDR, ignoring", node.Name)
|
||||
klog.Infof("Node %v has no CIDR, ignoring", node.Name)
|
||||
continue
|
||||
} else {
|
||||
glog.Infof("Node %v has CIDR %s, occupying it in CIDR map",
|
||||
klog.Infof("Node %v has CIDR %s, occupying it in CIDR map",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
}
|
||||
if err := ra.occupyCIDR(&node); err != nil {
|
||||
@ -154,8 +154,8 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
func (r *rangeAllocator) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting range CIDR allocator")
|
||||
defer glog.Infof("Shutting down range CIDR allocator")
|
||||
klog.Infof("Starting range CIDR allocator")
|
||||
defer klog.Infof("Shutting down range CIDR allocator")
|
||||
|
||||
if !controller.WaitForCacheSync("cidrallocator", stopCh, r.nodesSynced) {
|
||||
return
|
||||
@ -173,7 +173,7 @@ func (r *rangeAllocator) worker(stopChan <-chan struct{}) {
|
||||
select {
|
||||
case workItem, ok := <-r.nodeCIDRUpdateChannel:
|
||||
if !ok {
|
||||
glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
return
|
||||
}
|
||||
if err := r.updateCIDRAllocation(workItem); err != nil {
|
||||
@ -225,7 +225,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
if !r.insertNodeToProcessing(node.Name) {
|
||||
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
return nil
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
@ -238,7 +238,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
return fmt.Errorf("failed to allocate cidr: %v", err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
|
||||
klog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
|
||||
r.nodeCIDRUpdateChannel <- nodeAndCIDR{
|
||||
nodeName: node.Name,
|
||||
cidr: podCIDR,
|
||||
@ -255,7 +255,7 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
|
||||
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
|
||||
klog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
|
||||
if err = r.cidrs.Release(podCIDR); err != nil {
|
||||
return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err)
|
||||
}
|
||||
@ -275,7 +275,7 @@ func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
|
||||
}
|
||||
|
||||
if err := r.cidrs.Occupy(serviceCIDR); err != nil {
|
||||
glog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
|
||||
klog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -289,37 +289,37 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
|
||||
|
||||
node, err = r.nodeLister.Get(data.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if node.Spec.PodCIDR == podCIDR {
|
||||
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
return nil
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
glog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
klog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
if err := r.cidrs.Release(data.cidr); err != nil {
|
||||
glog.Errorf("Error when releasing CIDR %v", podCIDR)
|
||||
klog.Errorf("Error when releasing CIDR %v", podCIDR)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// If we reached here, it means that the node has no CIDR currently assigned. So we set it.
|
||||
for i := 0; i < cidrUpdateRetries; i++ {
|
||||
if err = utilnode.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
|
||||
// We accept the fact that we may leak CIDRs here. This is safer than releasing
|
||||
// them in case when we don't know if request went through.
|
||||
// NodeController restart will return all falsely allocated CIDRs to the pool.
|
||||
if !apierrors.IsServerTimeout(err) {
|
||||
glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err)
|
||||
klog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err)
|
||||
if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil {
|
||||
glog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
|
||||
klog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/BUILD
generated
vendored
@ -7,8 +7,8 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -19,9 +19,9 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/test:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
64
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/sync.go
generated
vendored
64
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/sync.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
@ -120,7 +120,7 @@ func New(c controller, cloudAlias cloudAlias, kubeAPI kubeAPI, mode NodeSyncMode
|
||||
// Loop runs the sync loop for a given node. done is an optional channel that
|
||||
// is closed when the Loop() returns.
|
||||
func (sync *NodeSync) Loop(done chan struct{}) {
|
||||
glog.V(2).Infof("Starting sync loop for node %q", sync.nodeName)
|
||||
klog.V(2).Infof("Starting sync loop for node %q", sync.nodeName)
|
||||
|
||||
defer func() {
|
||||
if done != nil {
|
||||
@ -130,13 +130,13 @@ func (sync *NodeSync) Loop(done chan struct{}) {
|
||||
|
||||
timeout := sync.c.ResyncTimeout()
|
||||
delayTimer := time.NewTimer(timeout)
|
||||
glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
klog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
|
||||
for {
|
||||
select {
|
||||
case op, more := <-sync.opChan:
|
||||
if !more {
|
||||
glog.V(2).Infof("Stopping sync loop")
|
||||
klog.V(2).Infof("Stopping sync loop")
|
||||
return
|
||||
}
|
||||
sync.c.ReportResult(op.run(sync))
|
||||
@ -144,13 +144,13 @@ func (sync *NodeSync) Loop(done chan struct{}) {
|
||||
<-delayTimer.C
|
||||
}
|
||||
case <-delayTimer.C:
|
||||
glog.V(4).Infof("Running resync for node %q", sync.nodeName)
|
||||
klog.V(4).Infof("Running resync for node %q", sync.nodeName)
|
||||
sync.c.ReportResult((&updateOp{}).run(sync))
|
||||
}
|
||||
|
||||
timeout := sync.c.ResyncTimeout()
|
||||
delayTimer.Reset(timeout)
|
||||
glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
klog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,15 +190,15 @@ func (op *updateOp) String() string {
|
||||
}
|
||||
|
||||
func (op *updateOp) run(sync *NodeSync) error {
|
||||
glog.V(3).Infof("Running updateOp %+v", op)
|
||||
klog.V(3).Infof("Running updateOp %+v", op)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if op.node == nil {
|
||||
glog.V(3).Infof("Getting node spec for %q", sync.nodeName)
|
||||
klog.V(3).Infof("Getting node spec for %q", sync.nodeName)
|
||||
node, err := sync.kubeAPI.Node(ctx, sync.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting node %q spec: %v", sync.nodeName, err)
|
||||
klog.Errorf("Error getting node %q spec: %v", sync.nodeName, err)
|
||||
return err
|
||||
}
|
||||
op.node = node
|
||||
@ -206,7 +206,7 @@ func (op *updateOp) run(sync *NodeSync) error {
|
||||
|
||||
aliasRange, err := sync.cloudAlias.Alias(ctx, sync.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting cloud alias for node %q: %v", sync.nodeName, err)
|
||||
klog.Errorf("Error getting cloud alias for node %q: %v", sync.nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -228,19 +228,19 @@ func (op *updateOp) run(sync *NodeSync) error {
|
||||
// match.
|
||||
func (op *updateOp) validateRange(ctx context.Context, sync *NodeSync, node *v1.Node, aliasRange *net.IPNet) error {
|
||||
if node.Spec.PodCIDR != aliasRange.String() {
|
||||
glog.Errorf("Inconsistency detected between node PodCIDR and node alias (%v != %v)",
|
||||
klog.Errorf("Inconsistency detected between node PodCIDR and node alias (%v != %v)",
|
||||
node.Spec.PodCIDR, aliasRange)
|
||||
sync.kubeAPI.EmitNodeWarningEvent(node.Name, MismatchEvent,
|
||||
"Node.Spec.PodCIDR != cloud alias (%v != %v)", node.Spec.PodCIDR, aliasRange)
|
||||
// User intervention is required in this case, as this is most likely due
|
||||
// to the user mucking around with their VM aliases on the side.
|
||||
} else {
|
||||
glog.V(4).Infof("Node %q CIDR range %v is matches cloud assignment", node.Name, node.Spec.PodCIDR)
|
||||
klog.V(4).Infof("Node %q CIDR range %v is matches cloud assignment", node.Name, node.Spec.PodCIDR)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateNodeFromAlias updates the the node from the cloud allocated
|
||||
// updateNodeFromAlias updates the node from the cloud allocated
|
||||
// alias.
|
||||
func (op *updateOp) updateNodeFromAlias(ctx context.Context, sync *NodeSync, node *v1.Node, aliasRange *net.IPNet) error {
|
||||
if sync.mode != SyncFromCloud {
|
||||
@ -249,26 +249,26 @@ func (op *updateOp) updateNodeFromAlias(ctx context.Context, sync *NodeSync, nod
|
||||
return fmt.Errorf("cannot sync from cloud in mode %q", sync.mode)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updating node spec with alias range, node.PodCIDR = %v", aliasRange)
|
||||
klog.V(2).Infof("Updating node spec with alias range, node.PodCIDR = %v", aliasRange)
|
||||
|
||||
if err := sync.set.Occupy(aliasRange); err != nil {
|
||||
glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
klog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, aliasRange); err != nil {
|
||||
glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, aliasRange, err)
|
||||
klog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, aliasRange, err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange)
|
||||
klog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange)
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updated node %q PodCIDR from cloud alias %v", node.Name, aliasRange)
|
||||
klog.V(2).Infof("Updated node %q PodCIDR from cloud alias %v", node.Name, aliasRange)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -283,27 +283,27 @@ func (op *updateOp) updateAliasFromNode(ctx context.Context, sync *NodeSync, nod
|
||||
|
||||
_, aliasRange, err := net.ParseCIDR(node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not parse PodCIDR (%q) for node %q: %v",
|
||||
klog.Errorf("Could not parse PodCIDR (%q) for node %q: %v",
|
||||
node.Spec.PodCIDR, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.set.Occupy(aliasRange); err != nil {
|
||||
glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
klog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.cloudAlias.AddAlias(ctx, node.Name, aliasRange); err != nil {
|
||||
glog.Errorf("Could not add alias %v for node %q: %v", aliasRange, node.Name, err)
|
||||
klog.Errorf("Could not add alias %v for node %q: %v", aliasRange, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updated node %q cloud alias with node spec, node.PodCIDR = %v",
|
||||
klog.V(2).Infof("Updated node %q cloud alias with node spec, node.PodCIDR = %v",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
|
||||
return nil
|
||||
@ -326,21 +326,21 @@ func (op *updateOp) allocateRange(ctx context.Context, sync *NodeSync, node *v1.
|
||||
// is no durable record of the range. The missing space will be
|
||||
// recovered on the next restart of the controller.
|
||||
if err := sync.cloudAlias.AddAlias(ctx, node.Name, cidrRange); err != nil {
|
||||
glog.Errorf("Could not add alias %v for node %q: %v", cidrRange, node.Name, err)
|
||||
klog.Errorf("Could not add alias %v for node %q: %v", cidrRange, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, cidrRange); err != nil {
|
||||
glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, cidrRange, err)
|
||||
klog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, cidrRange, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Allocated PodCIDR %v for node %q", cidrRange, node.Name)
|
||||
klog.V(2).Infof("Allocated PodCIDR %v for node %q", cidrRange, node.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -358,15 +358,15 @@ func (op *deleteOp) String() string {
|
||||
}
|
||||
|
||||
func (op *deleteOp) run(sync *NodeSync) error {
|
||||
glog.V(3).Infof("Running deleteOp %+v", op)
|
||||
klog.V(3).Infof("Running deleteOp %+v", op)
|
||||
if op.node.Spec.PodCIDR == "" {
|
||||
glog.V(2).Infof("Node %q was deleted, node had no PodCIDR range assigned", op.node.Name)
|
||||
klog.V(2).Infof("Node %q was deleted, node had no PodCIDR range assigned", op.node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
_, cidrRange, err := net.ParseCIDR(op.node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Deleted node %q has an invalid podCIDR %q: %v",
|
||||
klog.Errorf("Deleted node %q has an invalid podCIDR %q: %v",
|
||||
op.node.Name, op.node.Spec.PodCIDR, err)
|
||||
sync.kubeAPI.EmitNodeWarningEvent(op.node.Name, InvalidPodCIDR,
|
||||
"Node %q has an invalid PodCIDR: %q", op.node.Name, op.node.Spec.PodCIDR)
|
||||
@ -374,7 +374,7 @@ func (op *deleteOp) run(sync *NodeSync) error {
|
||||
}
|
||||
|
||||
sync.set.Release(cidrRange)
|
||||
glog.V(2).Infof("Node %q was deleted, releasing CIDR range %v",
|
||||
klog.V(2).Infof("Node %q was deleted, releasing CIDR range %v",
|
||||
op.node.Name, op.node.Spec.PodCIDR)
|
||||
|
||||
return nil
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/sync_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync/sync_test.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
||||
|
||||
@ -88,7 +88,7 @@ func (f *fakeAPIs) EmitNodeWarningEvent(nodeName, reason, fmtStr string, args ..
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) ReportResult(err error) {
|
||||
glog.V(2).Infof("ReportResult %v", err)
|
||||
klog.V(2).Infof("ReportResult %v", err)
|
||||
f.results = append(f.results, err)
|
||||
if f.reportChan != nil {
|
||||
f.reportChan <- struct{}{}
|
||||
@ -104,7 +104,7 @@ func (f *fakeAPIs) ResyncTimeout() time.Duration {
|
||||
|
||||
func (f *fakeAPIs) dumpTrace() {
|
||||
for i, x := range f.calls {
|
||||
glog.Infof("trace %v: %v", i, x)
|
||||
klog.Infof("trace %v: %v", i, x)
|
||||
}
|
||||
}
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/node_ipam_controller.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/node_ipam_controller.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
@ -32,7 +32,7 @@ import (
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync"
|
||||
@ -90,28 +90,31 @@ func NewNodeIpamController(
|
||||
allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
|
||||
|
||||
if kubeClient == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting Controller")
|
||||
klog.Fatalf("kubeClient is nil when starting Controller")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
klog.Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(
|
||||
&v1core.EventSinkImpl{
|
||||
Interface: kubeClient.CoreV1().Events(""),
|
||||
})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
if kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("node_ipam_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||
}
|
||||
|
||||
if clusterCIDR == nil {
|
||||
glog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
|
||||
klog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
|
||||
if allocatorType != ipam.CloudAllocatorType {
|
||||
// Cloud CIDR allocator does not rely on clusterCIDR or nodeCIDRMaskSize for allocation.
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
klog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
|
||||
}
|
||||
}
|
||||
|
||||
ic := &Controller{
|
||||
@ -138,10 +141,10 @@ func NewNodeIpamController(
|
||||
}
|
||||
ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating ipam controller: %v", err)
|
||||
klog.Fatalf("Error creating ipam controller: %v", err)
|
||||
}
|
||||
if err := ipamc.Start(nodeInformer); err != nil {
|
||||
glog.Fatalf("Error trying to Init(): %v", err)
|
||||
klog.Fatalf("Error trying to Init(): %v", err)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
@ -162,8 +165,8 @@ func NewNodeIpamController(
|
||||
func (nc *Controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting ipam controller")
|
||||
defer glog.Infof("Shutting down ipam controller")
|
||||
klog.Infof("Starting ipam controller")
|
||||
defer klog.Infof("Shutting down ipam controller")
|
||||
|
||||
if !controller.WaitForCacheSync("node", stopCh, nc.nodeInformerSynced) {
|
||||
return
|
||||
|
102
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/node_ipam_controller_test.go
generated
vendored
Normal file
102
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/node_ipam_controller_test.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeipam
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
)
|
||||
|
||||
func newTestNodeIpamController(clusterCIDR, serviceCIDR *net.IPNet, nodeCIDRMaskSize int, allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
|
||||
clientSet := fake.NewSimpleClientset()
|
||||
fakeNodeHandler := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node0"}},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
}
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
|
||||
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
|
||||
|
||||
for _, node := range fakeNodeHandler.Existing {
|
||||
fakeNodeInformer.Informer().GetStore().Add(node)
|
||||
}
|
||||
|
||||
fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues())
|
||||
return NewNodeIpamController(
|
||||
fakeNodeInformer, fakeGCE, clientSet,
|
||||
clusterCIDR, serviceCIDR, nodeCIDRMaskSize, allocatorType,
|
||||
)
|
||||
}
|
||||
|
||||
// TestNewNodeIpamControllerWithCIDRMasks tests if the controller can be
|
||||
// created with combinations of network CIDRs and masks.
|
||||
func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
clusterCIDR string
|
||||
serviceCIDR string
|
||||
maskSize int
|
||||
allocatorType ipam.CIDRAllocatorType
|
||||
wantFatal bool
|
||||
}{
|
||||
{"valid_range_allocator", "10.0.0.0/21", "10.1.0.0/21", 24, ipam.RangeAllocatorType, false},
|
||||
{"valid_cloud_allocator", "10.0.0.0/21", "10.1.0.0/21", 24, ipam.CloudAllocatorType, false},
|
||||
{"valid_ipam_from_cluster", "10.0.0.0/21", "10.1.0.0/21", 24, ipam.IPAMFromClusterAllocatorType, false},
|
||||
{"valid_ipam_from_cloud", "10.0.0.0/21", "10.1.0.0/21", 24, ipam.IPAMFromCloudAllocatorType, false},
|
||||
{"invalid_cluster_CIDR", "invalid", "10.1.0.0/21", 24, ipam.CloudAllocatorType, true},
|
||||
{"valid_CIDR_smaller_than_mask_cloud_allocator", "10.0.0.0/26", "10.1.0.0/21", 24, ipam.CloudAllocatorType, false},
|
||||
{"invalid_CIDR_smaller_than_mask_other_allocators", "10.0.0.0/26", "10.1.0.0/21", 24, ipam.IPAMFromCloudAllocatorType, true},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
_, clusterCIDRIpNet, _ := net.ParseCIDR(tc.clusterCIDR)
|
||||
_, serviceCIDRIpNet, _ := net.ParseCIDR(tc.serviceCIDR)
|
||||
if os.Getenv("EXIT_ON_FATAL") == "1" {
|
||||
// This is the subprocess which runs the actual code.
|
||||
newTestNodeIpamController(clusterCIDRIpNet, serviceCIDRIpNet, tc.maskSize, tc.allocatorType)
|
||||
return
|
||||
}
|
||||
// This is the host process that monitors the exit code of the subprocess.
|
||||
cmd := exec.Command(os.Args[0], "-test.run=TestNewNodeIpamControllerWithCIDRMasks/"+tc.desc)
|
||||
cmd.Env = append(os.Environ(), "EXIT_ON_FATAL=1")
|
||||
err := cmd.Run()
|
||||
var gotFatal bool
|
||||
if err != nil {
|
||||
exitErr, ok := err.(*exec.ExitError)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to run subprocess: %v", err)
|
||||
}
|
||||
gotFatal = !exitErr.Success()
|
||||
}
|
||||
if gotFatal != tc.wantFatal {
|
||||
t.Errorf("newTestNodeIpamController(%v, %v, %v, %v) : gotFatal = %t ; wantFatal = %t", clusterCIDRIpNet, serviceCIDRIpNet, tc.maskSize, tc.allocatorType, gotFatal, tc.wantFatal)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user