vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

86
vendor/k8s.io/kubernetes/pkg/controller/cloud/BUILD generated vendored Normal file
View File

@ -0,0 +1,86 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"node_controller.go",
"pvlcontroller.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/cloud",
deps = [
"//pkg/api/v1/node:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/node:go_default_library",
"//plugin/pkg/scheduler/algorithm:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"node_controller_test.go",
"pvlcontroller_test.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/cloud",
library = ":go_default_library",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//plugin/pkg/scheduler/algorithm:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

8
vendor/k8s.io/kubernetes/pkg/controller/cloud/OWNERS generated vendored Normal file
View File

@ -0,0 +1,8 @@
approvers:
- thockin
- luxas
- wlan0
reviewers:
- thockin
- luxas
- wlan0

View File

@ -0,0 +1,494 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
clientretry "k8s.io/client-go/util/retry"
nodeutilv1 "k8s.io/kubernetes/pkg/api/v1/node"
"k8s.io/kubernetes/pkg/cloudprovider"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
)
var UpdateNodeSpecBackoff = wait.Backoff{
Steps: 20,
Duration: 50 * time.Millisecond,
Jitter: 1.0,
}
type CloudNodeController struct {
nodeInformer coreinformers.NodeInformer
kubeClient clientset.Interface
recorder record.EventRecorder
cloud cloudprovider.Interface
// Value controlling NodeController monitoring period, i.e. how often does NodeController
// check node status posted from kubelet. This value should be lower than nodeMonitorGracePeriod
// set in controller-manager
nodeMonitorPeriod time.Duration
nodeStatusUpdateFrequency time.Duration
}
const (
// nodeStatusUpdateRetry controls the number of retries of writing NodeStatus update.
nodeStatusUpdateRetry = 5
// The amount of time the nodecontroller should sleep between retrying NodeStatus updates
retrySleepTime = 20 * time.Millisecond
)
// NewCloudNodeController creates a CloudNodeController object
func NewCloudNodeController(
nodeInformer coreinformers.NodeInformer,
kubeClient clientset.Interface,
cloud cloudprovider.Interface,
nodeMonitorPeriod time.Duration,
nodeStatusUpdateFrequency time.Duration) *CloudNodeController {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"})
eventBroadcaster.StartLogging(glog.Infof)
if kubeClient != nil {
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
} else {
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
}
cnc := &CloudNodeController{
nodeInformer: nodeInformer,
kubeClient: kubeClient,
recorder: recorder,
cloud: cloud,
nodeMonitorPeriod: nodeMonitorPeriod,
nodeStatusUpdateFrequency: nodeStatusUpdateFrequency,
}
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cnc.AddCloudNode,
})
return cnc
}
// This controller deletes a node if kubelet is not reporting
// and the node is gone from the cloud provider.
func (cnc *CloudNodeController) Run() {
defer utilruntime.HandleCrash()
// The following loops run communicate with the APIServer with a worst case complexity
// of O(num_nodes) per cycle. These functions are justified here because these events fire
// very infrequently. DO NOT MODIFY this to perform frequent operations.
// Start a loop to periodically update the node addresses obtained from the cloud
go wait.Until(cnc.UpdateNodeStatus, cnc.nodeStatusUpdateFrequency, wait.NeverStop)
// Start a loop to periodically check if any nodes have been deleted from cloudprovider
go wait.Until(cnc.MonitorNode, cnc.nodeMonitorPeriod, wait.NeverStop)
}
// UpdateNodeStatus updates the node status, such as node addresses
func (cnc *CloudNodeController) UpdateNodeStatus() {
instances, ok := cnc.cloud.Instances()
if !ok {
utilruntime.HandleError(fmt.Errorf("failed to get instances from cloud provider"))
return
}
nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
glog.Errorf("Error monitoring node status: %v", err)
return
}
for i := range nodes.Items {
cnc.updateNodeAddress(&nodes.Items[i], instances)
}
}
// UpdateNodeAddress updates the nodeAddress of a single node
func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloudprovider.Instances) {
// Do not process nodes that are still tainted
cloudTaint := getCloudTaint(node.Spec.Taints)
if cloudTaint != nil {
glog.V(5).Infof("This node %s is still tainted. Will not process.", node.Name)
return
}
// Node that isn't present according to the cloud provider shouldn't have its address updated
exists, err := ensureNodeExistsByProviderIDOrExternalID(instances, node)
if err != nil {
// Continue to update node address when not sure the node is not exists
glog.Errorf("%v", err)
} else if !exists {
glog.V(4).Infof("The node %s is no longer present according to the cloud provider, do not process.", node.Name)
return
}
nodeAddresses, err := getNodeAddressesByProviderIDOrName(instances, node)
if err != nil {
glog.Errorf("%v", err)
return
}
// Check if a hostname address exists in the cloud provided addresses
hostnameExists := false
for i := range nodeAddresses {
if nodeAddresses[i].Type == v1.NodeHostName {
hostnameExists = true
}
}
// If hostname was not present in cloud provided addresses, use the hostname
// from the existing node (populated by kubelet)
if !hostnameExists {
for _, addr := range node.Status.Addresses {
if addr.Type == v1.NodeHostName {
nodeAddresses = append(nodeAddresses, addr)
}
}
}
// If nodeIP was suggested by user, ensure that
// it can be found in the cloud as well (consistent with the behaviour in kubelet)
if nodeIP, ok := ensureNodeProvidedIPExists(node, nodeAddresses); ok {
if nodeIP == nil {
glog.Errorf("Specified Node IP not found in cloudprovider")
return
}
nodeAddresses = []v1.NodeAddress{*nodeIP}
}
newNode := node.DeepCopy()
newNode.Status.Addresses = nodeAddresses
if !nodeAddressesChangeDetected(node.Status.Addresses, newNode.Status.Addresses) {
return
}
_, err = nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode)
if err != nil {
glog.Errorf("Error patching node with cloud ip addresses = [%v]", err)
}
}
// Monitor node queries the cloudprovider for non-ready nodes and deletes them
// if they cannot be found in the cloud provider
func (cnc *CloudNodeController) MonitorNode() {
instances, ok := cnc.cloud.Instances()
if !ok {
utilruntime.HandleError(fmt.Errorf("failed to get instances from cloud provider"))
return
}
nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
glog.Errorf("Error monitoring node status: %v", err)
return
}
for i := range nodes.Items {
var currentReadyCondition *v1.NodeCondition
node := &nodes.Items[i]
// Try to get the current node status
// If node status is empty, then kubelet has not posted ready status yet. In this case, process next node
for rep := 0; rep < nodeStatusUpdateRetry; rep++ {
_, currentReadyCondition = nodeutilv1.GetNodeCondition(&node.Status, v1.NodeReady)
if currentReadyCondition != nil {
break
}
name := node.Name
node, err = cnc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
break
}
time.Sleep(retrySleepTime)
}
if currentReadyCondition == nil {
glog.Errorf("Update status of Node %v from CloudNodeController exceeds retry count or the Node was deleted.", node.Name)
continue
}
// If the known node status says that Node is NotReady, then check if the node has been removed
// from the cloud provider. If node cannot be found in cloudprovider, then delete the node immediately
if currentReadyCondition != nil {
if currentReadyCondition.Status != v1.ConditionTrue {
// Check with the cloud provider to see if the node still exists. If it
// doesn't, delete the node immediately.
exists, err := ensureNodeExistsByProviderIDOrExternalID(instances, node)
if err != nil {
glog.Errorf("Error getting data for node %s from cloud: %v", node.Name, err)
continue
}
if exists {
// Continue checking the remaining nodes since the current one is fine.
continue
}
glog.V(2).Infof("Deleting node since it is no longer present in cloud provider: %s", node.Name)
ref := &v1.ObjectReference{
Kind: "Node",
Name: node.Name,
UID: types.UID(node.UID),
Namespace: "",
}
glog.V(2).Infof("Recording %s event message for node %s", "DeletingNode", node.Name)
cnc.recorder.Eventf(ref, v1.EventTypeNormal, fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name), "Node %s event: %s", node.Name, "DeletingNode")
go func(nodeName string) {
defer utilruntime.HandleCrash()
if err := cnc.kubeClient.CoreV1().Nodes().Delete(nodeName, nil); err != nil {
glog.Errorf("unable to delete node %q: %v", nodeName, err)
}
}(node.Name)
}
}
}
}
// This processes nodes that were added into the cluster, and cloud initialize them if appropriate
func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
node := obj.(*v1.Node)
instances, ok := cnc.cloud.Instances()
if !ok {
utilruntime.HandleError(fmt.Errorf("failed to get instances from cloud provider"))
return
}
cloudTaint := getCloudTaint(node.Spec.Taints)
if cloudTaint == nil {
glog.V(2).Infof("This node %s is registered without the cloud taint. Will not process.", node.Name)
return
}
err := clientretry.RetryOnConflict(UpdateNodeSpecBackoff, func() error {
curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
return err
}
if curNode.Spec.ProviderID == "" {
providerID, err := cloudprovider.GetInstanceProviderID(cnc.cloud, types.NodeName(curNode.Name))
if err == nil {
curNode.Spec.ProviderID = providerID
} else {
// we should attempt to set providerID on curNode, but
// we can continue if we fail since we will attempt to set
// node addresses given the node name in getNodeAddressesByProviderIDOrName
glog.Errorf("failed to set node provider id: %v", err)
}
}
nodeAddresses, err := getNodeAddressesByProviderIDOrName(instances, curNode)
if err != nil {
glog.Errorf("%v", err)
return nil
}
// If user provided an IP address, ensure that IP address is found
// in the cloud provider before removing the taint on the node
if nodeIP, ok := ensureNodeProvidedIPExists(curNode, nodeAddresses); ok {
if nodeIP == nil {
glog.Errorf("failed to get specified nodeIP in cloudprovider")
return nil
}
}
if instanceType, err := getInstanceTypeByProviderIDOrName(instances, curNode); err != nil {
glog.Errorf("%v", err)
return err
} else if instanceType != "" {
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
curNode.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType
}
// TODO(wlan0): Move this logic to the route controller using the node taint instead of condition
// Since there are node taints, do we still need this?
// This condition marks the node as unusable until routes are initialized in the cloud provider
if cnc.cloud.ProviderName() == "gce" {
curNode.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
Reason: "NoRouteCreated",
Message: "Node created without a route",
LastTransitionTime: metav1.Now(),
})
}
if zones, ok := cnc.cloud.Zones(); ok {
zone, err := getZoneByProviderIDOrName(zones, curNode)
if err != nil {
return fmt.Errorf("failed to get zone from cloud provider: %v", err)
}
if zone.FailureDomain != "" {
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain)
curNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain
}
if zone.Region != "" {
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region)
curNode.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region
}
}
curNode.Spec.Taints = excludeTaintFromList(curNode.Spec.Taints, *cloudTaint)
_, err = cnc.kubeClient.CoreV1().Nodes().Update(curNode)
if err != nil {
return err
}
// After adding, call UpdateNodeAddress to set the CloudProvider provided IPAddresses
// So that users do not see any significant delay in IP addresses being filled into the node
cnc.updateNodeAddress(curNode, instances)
return nil
})
if err != nil {
utilruntime.HandleError(err)
return
}
glog.Infof("Successfully initialized node %s with cloud provider", node.Name)
}
func getCloudTaint(taints []v1.Taint) *v1.Taint {
for _, taint := range taints {
if taint.Key == algorithm.TaintExternalCloudProvider {
return &taint
}
}
return nil
}
func excludeTaintFromList(taints []v1.Taint, toExclude v1.Taint) []v1.Taint {
newTaints := []v1.Taint{}
for _, taint := range taints {
if toExclude.MatchTaint(&taint) {
continue
}
newTaints = append(newTaints, taint)
}
return newTaints
}
// ensureNodeExistsByProviderIDOrExternalID first checks if the instance exists by the provider id and then by calling external id with node name
func ensureNodeExistsByProviderIDOrExternalID(instances cloudprovider.Instances, node *v1.Node) (bool, error) {
exists, err := instances.InstanceExistsByProviderID(node.Spec.ProviderID)
if err != nil {
providerIDErr := err
_, err = instances.ExternalID(types.NodeName(node.Name))
if err == nil {
return true, nil
}
if err == cloudprovider.InstanceNotFound {
return false, nil
}
return false, fmt.Errorf("InstanceExistsByProviderID: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
}
return exists, nil
}
func getNodeAddressesByProviderIDOrName(instances cloudprovider.Instances, node *v1.Node) ([]v1.NodeAddress, error) {
nodeAddresses, err := instances.NodeAddressesByProviderID(node.Spec.ProviderID)
if err != nil {
providerIDErr := err
nodeAddresses, err = instances.NodeAddresses(types.NodeName(node.Name))
if err != nil {
return nil, fmt.Errorf("NodeAddress: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
}
}
return nodeAddresses, nil
}
func nodeAddressesChangeDetected(addressSet1, addressSet2 []v1.NodeAddress) bool {
if len(addressSet1) != len(addressSet2) {
return true
}
addressMap1 := map[v1.NodeAddressType]string{}
addressMap2 := map[v1.NodeAddressType]string{}
for i := range addressSet1 {
addressMap1[addressSet1[i].Type] = addressSet1[i].Address
addressMap2[addressSet2[i].Type] = addressSet2[i].Address
}
for k, v := range addressMap1 {
if addressMap2[k] != v {
return true
}
}
return false
}
func ensureNodeProvidedIPExists(node *v1.Node, nodeAddresses []v1.NodeAddress) (*v1.NodeAddress, bool) {
var nodeIP *v1.NodeAddress
nodeIPExists := false
if providedIP, ok := node.ObjectMeta.Annotations[kubeletapis.AnnotationProvidedIPAddr]; ok {
nodeIPExists = true
for i := range nodeAddresses {
if nodeAddresses[i].Address == providedIP {
nodeIP = &nodeAddresses[i]
break
}
}
}
return nodeIP, nodeIPExists
}
func getInstanceTypeByProviderIDOrName(instances cloudprovider.Instances, node *v1.Node) (string, error) {
instanceType, err := instances.InstanceTypeByProviderID(node.Spec.ProviderID)
if err != nil {
providerIDErr := err
instanceType, err = instances.InstanceType(types.NodeName(node.Name))
if err != nil {
return "", fmt.Errorf("InstanceType: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
}
}
return instanceType, err
}
// getZoneByProviderIDorName will attempt to get the zone of node using its providerID
// then it's name. If both attempts fail, an error is returned
func getZoneByProviderIDOrName(zones cloudprovider.Zones, node *v1.Node) (cloudprovider.Zone, error) {
zone, err := zones.GetZoneByProviderID(node.Spec.ProviderID)
if err != nil {
providerIDErr := err
zone, err = zones.GetZoneByNodeName(types.NodeName(node.Name))
if err != nil {
return cloudprovider.Zone{}, fmt.Errorf("Zone: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
}
}
return zone, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,280 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"encoding/json"
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
)
const initializerName = "pvlabel.kubernetes.io"
// PersistentVolumeLabelController handles adding labels to persistent volumes when they are created
type PersistentVolumeLabelController struct {
cloud cloudprovider.Interface
kubeClient kubernetes.Interface
pvlController cache.Controller
pvlIndexer cache.Indexer
volumeLister corelisters.PersistentVolumeLister
syncHandler func(key string) error
// queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors
queue workqueue.RateLimitingInterface
}
// NewPersistentVolumeLabelController creates a PersistentVolumeLabelController object
func NewPersistentVolumeLabelController(
kubeClient kubernetes.Interface,
cloud cloudprovider.Interface) *PersistentVolumeLabelController {
pvlc := &PersistentVolumeLabelController{
cloud: cloud,
kubeClient: kubeClient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvLabels"),
}
pvlc.syncHandler = pvlc.addLabels
pvlc.pvlIndexer, pvlc.pvlController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.IncludeUninitialized = true
return kubeClient.CoreV1().PersistentVolumes().List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.IncludeUninitialized = true
return kubeClient.CoreV1().PersistentVolumes().Watch(options)
},
},
&v1.PersistentVolume{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
pvlc.queue.Add(key)
}
},
},
cache.Indexers{},
)
pvlc.volumeLister = corelisters.NewPersistentVolumeLister(pvlc.pvlIndexer)
return pvlc
}
// Run starts a controller that adds labels to persistent volumes
func (pvlc *PersistentVolumeLabelController) Run(threadiness int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer pvlc.queue.ShutDown()
glog.Infof("Starting PersistentVolumeLabelController")
defer glog.Infof("Shutting down PersistentVolumeLabelController")
go pvlc.pvlController.Run(stopCh)
if !controller.WaitForCacheSync("persistent volume label", stopCh, pvlc.pvlController.HasSynced) {
return
}
// start up your worker threads based on threadiness. Some controllers have multiple kinds of workers
for i := 0; i < threadiness; i++ {
// runWorker will loop until "something bad" happens. The .Until will then rekick the worker
// after one second
go wait.Until(pvlc.runWorker, time.Second, stopCh)
}
// wait until we're told to stop
<-stopCh
}
func (pvlc *PersistentVolumeLabelController) runWorker() {
// hot loop until we're told to stop. processNextWorkItem will automatically wait until there's work
// available, so we don't worry about secondary waits
for pvlc.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (pvlc *PersistentVolumeLabelController) processNextWorkItem() bool {
// pull the next work item from queue. It should be a key we use to lookup something in a cache
keyObj, quit := pvlc.queue.Get()
if quit {
return false
}
// you always have to indicate to the queue that you've completed a piece of work
defer pvlc.queue.Done(keyObj)
key := keyObj.(string)
// do your work on the key. This method will contains your "do stuff" logic
err := pvlc.syncHandler(key)
if err == nil {
// if you had no error, tell the queue to stop tracking history for your key. This will
// reset things like failure counts for per-item rate limiting
pvlc.queue.Forget(key)
return true
}
// there was a failure so be sure to report it. This method allows for pluggable error handling
// which can be used for things like cluster-monitoring
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", key, err))
// since we failed, we should requeue the item to work on later. This method will add a backoff
// to avoid hotlooping on particular items (they're probably still not going to work right away)
// and overall controller protection (everything I've done is broken, this controller needs to
// calm down or it can starve other useful work) cases.
pvlc.queue.AddRateLimited(key)
return true
}
// AddLabels adds appropriate labels to persistent volumes and sets the
// volume as available if successful.
func (pvlc *PersistentVolumeLabelController) addLabels(key string) error {
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return fmt.Errorf("error getting name of volume %q to get volume from informer: %v", key, err)
}
volume, err := pvlc.volumeLister.Get(name)
if errors.IsNotFound(err) {
return nil
} else if err != nil {
return fmt.Errorf("error getting volume %s from informer: %v", name, err)
}
return pvlc.addLabelsToVolume(volume)
}
func (pvlc *PersistentVolumeLabelController) addLabelsToVolume(vol *v1.PersistentVolume) error {
var volumeLabels map[string]string
// Only add labels if in the list of initializers
if needsInitialization(vol.Initializers, initializerName) {
if labeler, ok := (pvlc.cloud).(cloudprovider.PVLabeler); ok {
labels, err := labeler.GetLabelsForVolume(vol)
if err != nil {
return fmt.Errorf("error querying volume %v: %v", vol.Spec, err)
}
volumeLabels = labels
} else {
glog.V(4).Info("cloud provider does not support PVLabeler")
}
return pvlc.updateVolume(vol, volumeLabels)
}
return nil
}
func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolume, volLabels map[string]string) ([]byte, error) {
volName := vol.Name
newVolume := vol.DeepCopyObject().(*v1.PersistentVolume)
if newVolume.Labels == nil {
newVolume.Labels = make(map[string]string)
}
for k, v := range volLabels {
newVolume.Labels[k] = v
}
newVolume.Initializers = removeInitializer(newVolume.Initializers, initializerName)
glog.V(4).Infof("removed initializer on PersistentVolume %s", newVolume.Name)
oldData, err := json.Marshal(vol)
if err != nil {
return nil, fmt.Errorf("failed to marshal old persistentvolume %#v for persistentvolume %q: %v", vol, volName, err)
}
newData, err := json.Marshal(newVolume)
if err != nil {
return nil, fmt.Errorf("failed to marshal new persistentvolume %#v for persistentvolume %q: %v", newVolume, volName, err)
}
patch, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.PersistentVolume{})
if err != nil {
return nil, fmt.Errorf("failed to create patch for persistentvolume %q: %v", volName, err)
}
return patch, nil
}
func (pvlc *PersistentVolumeLabelController) updateVolume(vol *v1.PersistentVolume, volLabels map[string]string) error {
volName := vol.Name
glog.V(4).Infof("updating PersistentVolume %s", volName)
patchBytes, err := pvlc.createPatch(vol, volLabels)
if err != nil {
return err
}
_, err = pvlc.kubeClient.CoreV1().PersistentVolumes().Patch(string(volName), types.StrategicMergePatchType, patchBytes)
if err != nil {
return fmt.Errorf("failed to update PersistentVolume %s: %v", volName, err)
}
glog.V(4).Infof("updated PersistentVolume %s", volName)
return err
}
func removeInitializer(initializers *metav1.Initializers, name string) *metav1.Initializers {
if initializers == nil {
return nil
}
var updated []metav1.Initializer
for _, pending := range initializers.Pending {
if pending.Name != name {
updated = append(updated, pending)
}
}
if len(updated) == len(initializers.Pending) {
return initializers
}
if len(updated) == 0 {
return nil
}
return &metav1.Initializers{Pending: updated}
}
func needsInitialization(initializers *metav1.Initializers, name string) bool {
hasInitializer := false
if initializers != nil {
for _, pending := range initializers.Pending {
if pending.Name == name {
hasInitializer = true
break
}
}
}
return hasInitializer
}

View File

@ -0,0 +1,188 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"encoding/json"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
)
func TestCreatePatch(t *testing.T) {
ignoredPV := v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "noncloud",
Initializers: &metav1.Initializers{
Pending: []metav1.Initializer{
{
Name: initializerName,
},
},
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/",
},
},
},
}
awsPV := v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "awsPV",
Initializers: &metav1.Initializers{
Pending: []metav1.Initializer{
{
Name: initializerName,
},
},
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "123",
},
},
},
}
testCases := map[string]struct {
vol v1.PersistentVolume
labels map[string]string
}{
"non-cloud PV": {
vol: ignoredPV,
labels: nil,
},
"no labels": {
vol: awsPV,
labels: nil,
},
"cloudprovider returns nil, nil": {
vol: awsPV,
labels: nil,
},
"cloudprovider labels": {
vol: awsPV,
labels: map[string]string{"a": "1", "b": "2"},
},
}
for d, tc := range testCases {
cloud := &fakecloud.FakeCloud{}
client := fake.NewSimpleClientset()
pvlController := NewPersistentVolumeLabelController(client, cloud)
patch, err := pvlController.createPatch(&tc.vol, tc.labels)
if err != nil {
t.Errorf("%s: createPatch returned err: %v", d, err)
}
obj := &v1.PersistentVolume{}
json.Unmarshal(patch, obj)
if tc.labels != nil {
for k, v := range tc.labels {
if obj.ObjectMeta.Labels[k] != v {
t.Errorf("%s: label %s expected %s got %s", d, k, v, obj.ObjectMeta.Labels[k])
}
}
}
if obj.ObjectMeta.Initializers != nil {
t.Errorf("%s: initializer wasn't removed: %v", d, obj.ObjectMeta.Initializers)
}
}
}
func TestAddLabelsToVolume(t *testing.T) {
pv := v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "awsPV",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "123",
},
},
},
}
testCases := map[string]struct {
vol v1.PersistentVolume
initializers *metav1.Initializers
shouldLabel bool
}{
"PV without initializer": {
vol: pv,
initializers: nil,
shouldLabel: false,
},
"PV with initializer to remove": {
vol: pv,
initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: initializerName}}},
shouldLabel: true,
},
"PV with other initializers": {
vol: pv,
initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: "OtherInit"}}},
shouldLabel: false,
},
}
for d, tc := range testCases {
labeledCh := make(chan bool, 1)
client := fake.NewSimpleClientset()
client.PrependReactor("patch", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
patch := action.(core.PatchActionImpl).GetPatch()
obj := &v1.PersistentVolume{}
json.Unmarshal(patch, obj)
if obj.ObjectMeta.Labels["a"] != "1" {
return false, nil, nil
}
labeledCh <- true
return true, nil, nil
})
fakeCloud := &fakecloud.FakeCloud{
VolumeLabelMap: map[string]map[string]string{"awsPV": {"a": "1"}},
}
pvlController := &PersistentVolumeLabelController{kubeClient: client, cloud: fakeCloud}
tc.vol.ObjectMeta.Initializers = tc.initializers
pvlController.addLabelsToVolume(&tc.vol)
select {
case l := <-labeledCh:
if l != tc.shouldLabel {
t.Errorf("%s: label of pv failed. expected %t got %t", d, tc.shouldLabel, l)
}
case <-time.After(500 * time.Millisecond):
if tc.shouldLabel != false {
t.Errorf("%s: timed out waiting for label notification", d)
}
}
}
}